Skip to content

Fix/5205 #5206

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Sep 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 35 additions & 82 deletions stackslib/src/net/p2p.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3560,8 +3560,8 @@ impl PeerNetwork {
let prune = if cur_epoch.epoch_id >= StacksEpochId::Epoch30 {
debug!("{:?}: run Nakamoto work loop", self.get_local_peer());

// in Nakamoto epoch, so do Nakamoto things
let prune = self.do_network_work_nakamoto(
// in Nakamoto epoch, so we can always prune
self.do_network_work_nakamoto(
burnchain_height,
sortdb,
chainstate,
Expand Down Expand Up @@ -3593,9 +3593,10 @@ impl PeerNetwork {
"{:?}: ran Epoch 2.x work loop in Nakamoto epoch",
self.get_local_peer()
);
prune || epoch2_prune
epoch2_prune
} else {
prune
// we can always prune in Nakamoto, since all state machines pin their connections
true
}
} else {
// in epoch 2.x, so do epoch 2.x things
Expand Down Expand Up @@ -3623,89 +3624,41 @@ impl PeerNetwork {
chainstate: &mut StacksChainState,
ibd: bool,
network_result: &mut NetworkResult,
) -> bool {
// do some Actual Work(tm)
let mut do_prune = false;
let mut did_cycle = false;

while !did_cycle {
// always do an inv sync
let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd);
debug!(
"{:?}: network work state is {:?}",
self.get_local_peer(),
&self.nakamoto_work_state;
"learned_new_blocks?" => learned
);

// always do block download
let new_blocks = self
.do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd)
.map_err(|e| {
warn!(
"{:?}: Failed to perform Nakamoto block sync: {:?}",
&self.get_local_peer(),
&e
);
e
})
.unwrap_or(HashMap::new());

network_result.consume_nakamoto_blocks(new_blocks);

let cur_state = self.nakamoto_work_state;
match self.nakamoto_work_state {
PeerNetworkWorkState::GetPublicIP => {
if cfg!(test) && self.connection_opts.disable_natpunch {
self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload;
} else {
// (re)determine our public IP address
let done = self.do_get_public_ip();
if done {
self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload;
}
}
}
PeerNetworkWorkState::BlockInvSync => {
// this state is useless in Nakamoto since we're always doing inv-syncs
self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload;
}
PeerNetworkWorkState::BlockDownload => {
// this state is useless in Nakamoto since we're always doing download-syncs
self.nakamoto_work_state = PeerNetworkWorkState::AntiEntropy;
}
PeerNetworkWorkState::AntiEntropy => {
debug!(
"{:?}: Block anti-entropy for Nakamoto is not yet implemented",
self.get_local_peer()
);
self.nakamoto_work_state = PeerNetworkWorkState::Prune;
}
PeerNetworkWorkState::Prune => {
// did one pass
did_cycle = true;
do_prune = true;
) {
// always do an inv sync
let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd);
debug!(
"{:?}: network work state is {:?}",
self.get_local_peer(),
&self.nakamoto_work_state;
"learned_new_blocks?" => learned
);

// restart
self.nakamoto_work_state = PeerNetworkWorkState::GetPublicIP;
}
}
// always do block download
let new_blocks = self
.do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd)
.map_err(|e| {
warn!(
"{:?}: Failed to perform Nakamoto block sync: {:?}",
&self.get_local_peer(),
&e
);
e
})
.unwrap_or(HashMap::new());

if self.nakamoto_work_state == cur_state {
// only break early if we can't make progress
break;
}
}
network_result.consume_nakamoto_blocks(new_blocks);

if did_cycle {
self.num_state_machine_passes += 1;
debug!(
"{:?}: Finished full p2p state-machine pass for Nakamoto ({})",
&self.local_peer, self.num_state_machine_passes
);
// make sure our public IP is fresh (this self-throttles if we recently learned it).
if !self.connection_opts.disable_natpunch {
self.do_get_public_ip();
}

do_prune
self.num_state_machine_passes += 1;
debug!(
"{:?}: Finished full p2p state-machine pass for Nakamoto ({})",
&self.local_peer, self.num_state_machine_passes
);
}

/// Do the actual work in the state machine.
Expand Down
11 changes: 11 additions & 0 deletions testnet/stacks-node/src/nakamoto_node/miner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ use crate::neon_node;
use crate::run_loop::nakamoto::Globals;
use crate::run_loop::RegisteredKey;

#[cfg(test)]
pub static TEST_MINE_STALL: std::sync::Mutex<Option<bool>> = std::sync::Mutex::new(None);
#[cfg(test)]
pub static TEST_BROADCAST_STALL: std::sync::Mutex<Option<bool>> = std::sync::Mutex::new(None);
#[cfg(test)]
Expand Down Expand Up @@ -291,6 +293,15 @@ impl BlockMinerThread {
let mut attempts = 0;
// now, actually run this tenure
loop {
#[cfg(test)]
if *TEST_MINE_STALL.lock().unwrap() == Some(true) {
// Do an extra check just so we don't log EVERY time.
warn!("Mining is stalled due to testing directive");
while *TEST_MINE_STALL.lock().unwrap() == Some(true) {
std::thread::sleep(std::time::Duration::from_millis(10));
}
warn!("Mining is no longer stalled due to testing directive. Continuing...");
}
let new_block = loop {
// If we're mock mining, we may not have processed the block that the
// actual tenure winner committed to yet. So, before attempting to
Expand Down
25 changes: 22 additions & 3 deletions testnet/stacks-node/src/tests/nakamoto_integrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,9 @@ use wsts::net::Message;

use super::bitcoin_regtest::BitcoinCoreController;
use crate::config::{EventKeyType, EventObserverConfig, InitialBalance};
use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL};
use crate::nakamoto_node::miner::{
TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL,
};
use crate::neon::{Counters, RunLoopCounter};
use crate::operations::BurnchainOpSigner;
use crate::run_loop::boot_nakamoto;
Expand Down Expand Up @@ -5186,6 +5188,8 @@ fn clarity_burn_state() {
);
result.expect_result_ok().expect("Read-only call failed");

// Pause mining to prevent the stacks block from being mined before the tenure change is processed
TEST_MINE_STALL.lock().unwrap().replace(true);
// Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment)
let call_tx = tests::make_contract_call(
&sender_sk,
Expand All @@ -5201,8 +5205,23 @@ fn clarity_burn_state() {
}

let commits_before = commits_submitted.load(Ordering::SeqCst);
next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel)
.unwrap();
let blocks_processed_before = coord_channel
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed();
next_block_and(&mut btc_regtest_controller, 60, || {
Ok(commits_submitted.load(Ordering::SeqCst) > commits_before)
})
.unwrap();
TEST_MINE_STALL.lock().unwrap().replace(false);
wait_for(20, || {
Ok(coord_channel
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed()
> blocks_processed_before)
})
.unwrap();

let info = get_chain_info(&naka_conf);
burn_block_height = info.burn_block_height as u128;
Expand Down