diff --git a/Cargo.lock b/Cargo.lock index cbd38bcf943..0e21ccf9b1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,21 +16,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "addr2line" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - [[package]] name = "aead" version = "0.6.0-rc.2" @@ -135,15 +120,6 @@ dependencies = [ "windows-sys 0.60.2", ] -[[package]] -name = "anyhow" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" -dependencies = [ - "backtrace", -] - [[package]] name = "anymap2" version = "0.13.0" @@ -372,21 +348,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "backtrace" -version = "0.3.76" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-link 0.2.1", -] - [[package]] name = "base16ct" version = "0.3.0" @@ -473,12 +434,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "btparse" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387e80962b798815a2b5c4bcfdb6bf626fa922ffe9f74e373103b858738e9f31" - [[package]] name = "bumpalo" version = "3.19.0" @@ -643,17 +598,6 @@ dependencies = [ "thiserror 2.0.17", ] -[[package]] -name = "color-backtrace" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308329d5d62e877ba02943db3a8e8c052de9fde7ab48283395ba0e6494efbabd" -dependencies = [ - "backtrace", - "btparse", - "termcolor", -] - [[package]] name = "colorchoice" version = "1.0.4" @@ -1418,12 +1362,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gimli" -version = "0.32.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" - [[package]] name = "gloo" version = "0.8.1" @@ -2203,10 +2141,9 @@ dependencies = [ "iroh-quinn-proto", "iroh-quinn-udp", "iroh-relay", + "n0-error", "n0-future", - "n0-snafu", "n0-watcher", - "nested_enum_utils", "netdev", "netwatch", "parse-size", @@ -2226,7 +2163,6 @@ dependencies = [ "serde", "serde_json", "smallvec", - "snafu", "strum", "swarm-discovery", "time", @@ -2252,8 +2188,7 @@ dependencies = [ "data-encoding", "derive_more 2.0.1", "ed25519-dalek", - "n0-snafu", - "nested_enum_utils", + "n0-error", "postcard", "proptest", "rand 0.9.2", @@ -2262,7 +2197,6 @@ dependencies = [ "serde", "serde_json", "serde_test", - "snafu", "url", "zeroize", "zeroize_derive", @@ -2278,8 +2212,8 @@ dependencies = [ "iroh", "iroh-metrics", "iroh-quinn", + "n0-error", "n0-future", - "n0-snafu", "rand 0.9.2", "rcgen", "rustls", @@ -2311,8 +2245,8 @@ dependencies = [ "iroh", "iroh-metrics", "lru 0.16.2", + "n0-error", "n0-future", - "n0-snafu", "pkarr", "rand 0.9.2", "rand_chacha 0.9.0", @@ -2322,7 +2256,6 @@ dependencies = [ "rustls", "rustls-pemfile", "serde", - "snafu", "struct_iterable", "strum", "tokio", @@ -2343,29 +2276,29 @@ dependencies = [ [[package]] name = "iroh-metrics" -version = "0.36.2" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84c167b59ae22f940e78eb347ca5f02aa25608e994cb5a7cc016ac2d5eada18" +checksum = "79e3381da7c93c12d353230c74bba26131d1c8bf3a4d8af0fec041546454582e" dependencies = [ "http-body-util", "hyper", "hyper-util", "iroh-metrics-derive", "itoa", + "n0-error", "postcard", "reqwest", "ryu", "serde", - "snafu", "tokio", "tracing", ] [[package]] name = "iroh-metrics-derive" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "748d380f26f7c25307c0a7acd181b84b977ddc2a1b7beece1e5998623c323aa1" +checksum = "d4e12bd0763fd16062f5cc5e8db15dd52d26e75a8af4c7fb57ccee3589b344b8" dependencies = [ "heck", "proc-macro2", @@ -2452,9 +2385,8 @@ dependencies = [ "iroh-quinn", "iroh-quinn-proto", "lru 0.16.2", + "n0-error", "n0-future", - "n0-snafu", - "nested_enum_utils", "num_enum", "pin-project", "pkarr", @@ -2475,7 +2407,6 @@ dependencies = [ "serde_json", "sha1", "simdutf8", - "snafu", "strum", "time", "tokio", @@ -2706,15 +2637,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - [[package]] name = "mio" version = "1.1.0" @@ -2744,6 +2666,29 @@ dependencies = [ "uuid", ] +[[package]] +name = "n0-error" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a4839a11b62f1fdd75be912ee20634053c734c2240e867ded41c7f50822c549" +dependencies = [ + "derive_more 2.0.1", + "n0-error-macros", + "spez", +] + +[[package]] +name = "n0-error-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed2a7e5ca3cb5729d4a162d7bcab5b338bed299a2fee8457568d7e0a747ed89" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "n0-future" version = "0.3.0" @@ -2765,40 +2710,15 @@ dependencies = [ "web-time", ] -[[package]] -name = "n0-snafu" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1815107e577a95bfccedb4cfabc73d709c0db6d12de3f14e0f284a8c5036dc4f" -dependencies = [ - "anyhow", - "btparse", - "color-backtrace", - "snafu", - "tracing-error", -] - [[package]] name = "n0-watcher" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34c65e127e06e5a2781b28df6a33ea474a7bddc0ac0cfea888bd20c79a1b6516" +checksum = "38acf13c1ddafc60eb7316d52213467f8ccb70b6f02b65e7d97f7799b1f50be4" dependencies = [ "derive_more 2.0.1", + "n0-error", "n0-future", - "snafu", -] - -[[package]] -name = "nested_enum_utils" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d5475271bdd36a4a2769eac1ef88df0f99428ea43e52dfd8b0ee5cb674695f" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", ] [[package]] @@ -2868,9 +2788,9 @@ dependencies = [ [[package]] name = "netwatch" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98d7ec7abdbfe67ee70af3f2002326491178419caea22254b9070e6ff0c83491" +checksum = "26f2acd376ef48b6c326abf3ba23c449e0cb8aa5c2511d189dd8a8a3bfac889b" dependencies = [ "atomic-waker", "bytes", @@ -2879,9 +2799,9 @@ dependencies = [ "iroh-quinn-udp", "js-sys", "libc", + "n0-error", "n0-future", "n0-watcher", - "nested_enum_utils", "netdev", "netlink-packet-core", "netlink-packet-route", @@ -2889,7 +2809,6 @@ dependencies = [ "netlink-sys", "pin-project-lite", "serde", - "snafu", "socket2 0.6.1", "time", "tokio", @@ -3012,15 +2931,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.37.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" -dependencies = [ - "memchr", -] - [[package]] name = "oid-registry" version = "0.8.1" @@ -3254,9 +3164,9 @@ checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "portmapper" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d73aa9bd141e0ff6060fea89a5437883f3b9ceea1cda71c790b90e17d072a3b3" +checksum = "7b575f975dcf03e258b0c7ab3f81497d7124f508884c37da66a7314aa2a8d467" dependencies = [ "base64", "bytes", @@ -3267,13 +3177,12 @@ dependencies = [ "igd-next", "iroh-metrics", "libc", - "nested_enum_utils", + "n0-error", "netwatch", "num_enum", "rand 0.9.2", "serde", "smallvec", - "snafu", "socket2 0.6.1", "time", "tokio", @@ -3727,12 +3636,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -4210,28 +4113,6 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fad6c857cbab2627dcf01ec85a623ca4e7dcb5691cbaa3d7fb7653671f0d09c9" -[[package]] -name = "snafu" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2" -dependencies = [ - "backtrace", - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "socket2" version = "0.5.10" @@ -4252,6 +4133,17 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "spez" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87e960f4dca2788eeb86bbdde8dd246be8948790b7618d656e68f9b720a86e8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "spin" version = "0.9.8" @@ -4440,15 +4332,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - [[package]] name = "thiserror" version = "1.0.69" @@ -4829,16 +4712,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-error" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" -dependencies = [ - "tracing", - "tracing-subscriber", -] - [[package]] name = "tracing-log" version = "0.2.0" diff --git a/iroh-base/Cargo.toml b/iroh-base/Cargo.toml index 8cc6dae640f..5dc3d0d7c98 100644 --- a/iroh-base/Cargo.toml +++ b/iroh-base/Cargo.toml @@ -22,9 +22,7 @@ derive_more = { version = "2.0.1", features = ["display"], optional = true } url = { version = "2.5.3", features = ["serde"], optional = true } rand_core = { version = "0.9.3", optional = true } serde = { version = "1", features = ["derive", "rc"] } -snafu = { version = "0.8.5", features = ["rust_1_81"], optional = true } -n0-snafu = "0.2.2" -nested_enum_utils = "0.2.0" +n0-error = "0.1.0" zeroize = { version = "1.8.2", optional = true, features = ["derive"] } zeroize_derive = { version = "1.4.2", optional = true } # needed for minimal versions @@ -44,7 +42,6 @@ key = [ "dep:ed25519-dalek", "dep:url", "dep:derive_more", - "dep:snafu", "dep:data-encoding", "dep:rand_core", "dep:zeroize", @@ -54,7 +51,6 @@ key = [ relay = [ "dep:url", "dep:derive_more", - "dep:snafu", ] [package.metadata.docs.rs] diff --git a/iroh-base/src/key.rs b/iroh-base/src/key.rs index 67328c10c68..9cb7b01bdd6 100644 --- a/iroh-base/src/key.rs +++ b/iroh-base/src/key.rs @@ -11,10 +11,9 @@ use std::{ use curve25519_dalek::edwards::CompressedEdwardsY; use ed25519_dalek::{SigningKey, VerifyingKey}; -use nested_enum_utils::common_fields; +use n0_error::{ensure, stack_error}; use rand_core::CryptoRng; use serde::{Deserialize, Serialize, de, ser}; -use snafu::{Backtrace, Snafu}; /// A public key. /// @@ -128,7 +127,7 @@ impl PublicKey { pub fn verify(&self, message: &[u8], signature: &Signature) -> Result<(), SignatureError> { self.as_verifying_key() .verify_strict(message, &signature.0) - .map_err(|_| SignatureSnafu.build()) + .map_err(|_| SignatureError::new()) } /// Convert to a hex string limited to the first 5 bytes for a friendly string @@ -204,26 +203,18 @@ impl Display for PublicKey { } /// Error when deserialising a [`PublicKey`] or a [`SecretKey`]. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Snafu, Debug)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[allow(missing_docs)] -#[snafu(visibility(pub(crate)))] pub enum KeyParsingError { /// Error when decoding. - #[snafu(transparent)] - Decode { source: data_encoding::DecodeError }, + #[error(transparent)] + Decode(data_encoding::DecodeError), /// Error when decoding the public key. - #[snafu(transparent)] - Key { - source: ed25519_dalek::SignatureError, - }, + #[error(transparent)] + Key(ed25519_dalek::SignatureError), /// The encoded information had the wrong length. - #[snafu(display("invalid length"))] - DecodeInvalidLength {}, + #[error("invalid length")] + DecodeInvalidLength, } /// Deserialises the [`PublicKey`] from it's base32 encoding. @@ -420,9 +411,9 @@ impl Signature { } /// Verification of a signature failed. -#[derive(Debug, Snafu)] -#[snafu(display("Invalid signature"))] -pub struct SignatureError; +#[stack_error(derive, add_meta)] +#[error("Invalid signature")] +pub struct SignatureError {} fn decode_base32_hex(s: &str) -> Result<[u8; 32], KeyParsingError> { let mut bytes = [0u8; 32]; @@ -433,16 +424,18 @@ fn decode_base32_hex(s: &str) -> Result<[u8; 32], KeyParsingError> { } else { let input = s.to_ascii_uppercase(); let input = input.as_bytes(); - if data_encoding::BASE32_NOPAD.decode_len(input.len())? != bytes.len() { - return Err(DecodeInvalidLengthSnafu.build()); - } + ensure!( + data_encoding::BASE32_NOPAD.decode_len(input.len())? == bytes.len(), + KeyParsingError::DecodeInvalidLength + ); data_encoding::BASE32_NOPAD.decode_mut(input, &mut bytes) }; match res { Ok(len) => { - if len != PublicKey::LENGTH { - return Err(DecodeInvalidLengthSnafu.build()); - } + ensure!( + len == PublicKey::LENGTH, + KeyParsingError::DecodeInvalidLength + ); } Err(partial) => return Err(partial.error.into()), } diff --git a/iroh-base/src/relay_url.rs b/iroh-base/src/relay_url.rs index b1aef365282..5925a39f092 100644 --- a/iroh-base/src/relay_url.rs +++ b/iroh-base/src/relay_url.rs @@ -1,7 +1,7 @@ use std::{fmt, ops::Deref, str::FromStr, sync::Arc}; +use n0_error::stack_error; use serde::{Deserialize, Serialize}; -use snafu::{Backtrace, ResultExt, Snafu}; use url::Url; /// A URL identifying a relay server. @@ -39,12 +39,9 @@ impl From for RelayUrl { } /// Can occur when parsing a string into a [`RelayUrl`]. -#[derive(Debug, Snafu)] -#[snafu(display("Failed to parse"))] -pub struct RelayUrlParseError { - source: url::ParseError, - backtrace: Option, -} +#[stack_error(derive, add_meta)] +#[error("Failed to parse relay URL")] +pub struct RelayUrlParseError(#[error(std_err)] url::ParseError); /// Support for parsing strings directly. /// @@ -54,7 +51,7 @@ impl FromStr for RelayUrl { type Err = RelayUrlParseError; fn from_str(s: &str) -> Result { - let inner = Url::from_str(s).context(RelayUrlParseSnafu)?; + let inner = Url::from_str(s).map_err(RelayUrlParseError::new)?; Ok(RelayUrl::from(inner)) } } diff --git a/iroh-dns-server/Cargo.toml b/iroh-dns-server/Cargo.toml index b1e8402b191..51eedeb3a69 100644 --- a/iroh-dns-server/Cargo.toml +++ b/iroh-dns-server/Cargo.toml @@ -28,10 +28,9 @@ hickory-server = { version = "0.25.1", features = ["https-ring"] } http = "1.0.0" humantime = "2.2.0" humantime-serde = "1.1.1" -iroh-metrics = { version = "0.36", features = ["service"] } +iroh-metrics = { version = "0.37", features = ["service"] } lru = "0.16" n0-future = "0.3.0" -n0-snafu = "0.2.2" pkarr = { version = "5", features = ["relays", "dht"], default-features = false } rcgen = "0.14" redb = "3.1.0" @@ -40,7 +39,7 @@ rustls = { version = "0.23.33", default-features = false, features = ["ring"] } rustls-pemfile = { version = "2.1" } serde = { version = "1", features = ["derive"] } struct_iterable = "0.1.1" -snafu = { version = "0.8.5", features = ["rust_1_81"] } +n0-error = "0.1.0" strum = { version = "0.27", features = ["derive"] } tokio = { version = "1", features = ["full"] } tokio-rustls = { version = "0.26", default-features = false, features = [ diff --git a/iroh-dns-server/benches/write.rs b/iroh-dns-server/benches/write.rs index 2e66db59578..9373c1bfb95 100644 --- a/iroh-dns-server/benches/write.rs +++ b/iroh-dns-server/benches/write.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use iroh::{SecretKey, discovery::pkarr::PkarrRelayClient, endpoint_info::EndpointInfo}; use iroh_dns_server::{ZoneStore, config::Config, metrics::Metrics, server::Server}; -use n0_snafu::Result; +use n0_error::Result; use rand_chacha::rand_core::SeedableRng; use tokio::runtime::Runtime; diff --git a/iroh-dns-server/examples/convert.rs b/iroh-dns-server/examples/convert.rs index 765c809f16f..1f6a2eb88ed 100644 --- a/iroh-dns-server/examples/convert.rs +++ b/iroh-dns-server/examples/convert.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use clap::Parser; use iroh::EndpointId; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; #[derive(Debug, Parser)] struct Cli { @@ -21,12 +21,12 @@ fn main() -> Result<()> { match args.command { Command::EndpointToPkarr { endpoint_id } => { let endpoint_id = EndpointId::from_str(&endpoint_id)?; - let public_key = pkarr::PublicKey::try_from(endpoint_id.as_bytes()).e()?; + let public_key = pkarr::PublicKey::try_from(endpoint_id.as_bytes()).anyerr()?; println!("{}", public_key.to_z32()) } Command::PkarrToEndpoint { z32_pubkey } => { - let public_key = pkarr::PublicKey::try_from(z32_pubkey.as_str()).e()?; - let endpoint_id = EndpointId::from_bytes(public_key.as_bytes()).e()?; + let public_key = pkarr::PublicKey::try_from(z32_pubkey.as_str()).anyerr()?; + let endpoint_id = EndpointId::from_bytes(public_key.as_bytes())?; println!("{endpoint_id}") } } diff --git a/iroh-dns-server/examples/publish.rs b/iroh-dns-server/examples/publish.rs index 99adec17cc0..d8fd5acffb2 100644 --- a/iroh-dns-server/examples/publish.rs +++ b/iroh-dns-server/examples/publish.rs @@ -10,7 +10,7 @@ use iroh::{ }, endpoint_info::{EndpointIdExt, EndpointInfo, IROH_TXT_NAME}, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StackResultExt}; use url::Url; const DEV_PKARR_RELAY_URL: &str = "http://localhost:8080/pkarr"; diff --git a/iroh-dns-server/examples/resolve.rs b/iroh-dns-server/examples/resolve.rs index 2f628585165..e9c536bbcf2 100644 --- a/iroh-dns-server/examples/resolve.rs +++ b/iroh-dns-server/examples/resolve.rs @@ -4,7 +4,7 @@ use iroh::{ discovery::dns::{N0_DNS_ENDPOINT_ORIGIN_PROD, N0_DNS_ENDPOINT_ORIGIN_STAGING}, dns::DnsResolver, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StackResultExt, StdResultExt}; const DEV_DNS_SERVER: &str = "127.0.0.1:5300"; const DEV_DNS_ORIGIN_DOMAIN: &str = "irohdns.example"; @@ -49,7 +49,7 @@ async fn main() -> Result<()> { let resolver = if let Some(host) = args.dns_server { let addr = tokio::net::lookup_host(host) .await - .e()? + .anyerr()? .next() .context("failed to resolve DNS server address")?; DnsResolver::with_nameserver(addr) diff --git a/iroh-dns-server/src/config.rs b/iroh-dns-server/src/config.rs index 867d89e07d5..9f602990099 100644 --- a/iroh-dns-server/src/config.rs +++ b/iroh-dns-server/src/config.rs @@ -7,7 +7,7 @@ use std::{ time::Duration, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; use serde::{Deserialize, Serialize}; use tracing::info; @@ -162,8 +162,8 @@ impl Config { ); let s = tokio::fs::read_to_string(path.as_ref()) .await - .with_context(|| format!("failed to read {}", path.as_ref().to_string_lossy()))?; - let config: Config = toml::from_str(&s).e()?; + .with_std_context(|_| format!("failed to read {}", path.as_ref().to_string_lossy()))?; + let config: Config = toml::from_str(&s).anyerr()?; Ok(config) } @@ -173,7 +173,7 @@ impl Config { PathBuf::from(val) } else { let path = dirs_next::data_dir() - .context("operating environment provides no directory for application data")?; + .std_context("operating environment provides no directory for application data")?; path.join("iroh-dns") }; diff --git a/iroh-dns-server/src/dns.rs b/iroh-dns-server/src/dns.rs index 38b4ad552e1..f12273df4a6 100644 --- a/iroh-dns-server/src/dns.rs +++ b/iroh-dns-server/src/dns.rs @@ -25,7 +25,7 @@ use hickory_server::{ server::{Request, RequestHandler, ResponseHandler, ResponseInfo}, store::in_memory::InMemoryAuthority, }; -use n0_snafu::{Result, ResultExt, format_err}; +use n0_error::{Result, StdResultExt, anyerr}; use serde::{Deserialize, Serialize}; use tokio::{ net::{TcpListener, UdpSocket}, @@ -82,12 +82,12 @@ impl DnsServer { config.port, ); - let socket = UdpSocket::bind(bind_addr).await.e()?; + let socket = UdpSocket::bind(bind_addr).await.anyerr()?; - let socket_addr = socket.local_addr().e()?; + let socket_addr = socket.local_addr().anyerr()?; server.register_socket(socket); - server.register_listener(TcpListener::bind(bind_addr).await.e()?, TCP_TIMEOUT); + server.register_listener(TcpListener::bind(bind_addr).await.anyerr()?, TCP_TIMEOUT); info!("DNS server listening on {}", bind_addr); Ok(Self { @@ -103,7 +103,7 @@ impl DnsServer { /// Shutdown the server an wait for all tasks to complete. pub async fn shutdown(mut self) -> Result<()> { - self.server.shutdown_gracefully().await.e()?; + self.server.shutdown_gracefully().await.anyerr()?; Ok(()) } @@ -111,7 +111,7 @@ impl DnsServer { /// /// Runs forever unless tasks fail. pub async fn run_until_done(mut self) -> Result<()> { - self.server.block_until_done().await.e()?; + self.server.block_until_done().await.anyerr()?; Ok(()) } } @@ -133,7 +133,7 @@ impl DnsHandler { .iter() .map(Name::from_utf8) .collect::, _>>() - .e()?; + .anyerr()?; let (static_authority, serial) = create_static_authority(&origins, config)?; let authority = Arc::new(NodeAuthority::new( @@ -159,7 +159,7 @@ impl DnsHandler { let (tx, mut rx) = broadcast::channel(1); let response_handle = Handle(tx); self.handle_request(&request, response_handle).await; - rx.recv().await.e() + rx.recv().await.anyerr() } } @@ -234,9 +234,9 @@ fn create_static_authority( config.default_soa.split_ascii_whitespace(), None, ) - .e()? + .anyerr()? .into_soa() - .map_err(|_| format_err!("Couldn't parse SOA: {}", config.default_soa))?; + .map_err(|_| anyerr!("Couldn't parse SOA: {}", config.default_soa))?; let serial = soa.serial(); let mut records = BTreeMap::new(); for name in origins { @@ -260,7 +260,7 @@ fn create_static_authority( ); } if let Some(ns) = &config.rr_ns { - let ns = Name::parse(ns, Some(&Name::root())).e()?; + let ns = Name::parse(ns, Some(&Name::root())).anyerr()?; push_record( &mut records, serial, @@ -270,7 +270,7 @@ fn create_static_authority( } let static_authority = InMemoryAuthority::new(Name::root(), records, ZoneType::Primary, false) - .map_err(|e| format_err!("new authority: {e}"))?; + .map_err(|e| anyerr!("new authority: {e}"))?; Ok((static_authority, serial)) } diff --git a/iroh-dns-server/src/dns/node_authority.rs b/iroh-dns-server/src/dns/node_authority.rs index caba6747191..db047b2be5f 100644 --- a/iroh-dns-server/src/dns/node_authority.rs +++ b/iroh-dns-server/src/dns/node_authority.rs @@ -13,8 +13,7 @@ use hickory_server::{ server::RequestInfo, store::in_memory::InMemoryAuthority, }; -use n0_snafu::{Result, ResultExt}; -use snafu::whatever; +use n0_error::{Result, StackResultExt, StdResultExt, bail_any}; use tracing::{debug, trace}; use crate::{ @@ -42,7 +41,7 @@ impl NodeAuthority { serial: u32, ) -> Result { if origins.is_empty() { - whatever!("at least one origin is required"); + bail_any!("at least one origin is required"); } let first_origin = LowerName::from(&origins[0]); Ok(Self { @@ -184,19 +183,19 @@ fn parse_name_as_pkarr_with_origin( continue; } if name.num_labels() < origin.num_labels() + 1 { - whatever!("not a valid pkarr name: missing pubkey"); + bail_any!("not a valid pkarr name: missing pubkey"); } trace!("parse {origin}"); let labels = name.iter().rev(); let mut labels_without_origin = labels.skip(origin.num_labels() as usize); let pkey_label = labels_without_origin.next().expect("length checked above"); - let pkey_str = std::str::from_utf8(pkey_label).e()?; + let pkey_str = std::str::from_utf8(pkey_label).anyerr()?; let pkey = PublicKeyBytes::from_z32(pkey_str).context("not a valid pkarr name: invalid pubkey")?; - let remaining_name = Name::from_labels(labels_without_origin.rev()).e()?; + let remaining_name = Name::from_labels(labels_without_origin.rev()).anyerr()?; return Ok((remaining_name, pkey, origin.clone())); } - whatever!("name does not match any allowed origin"); + bail_any!("name does not match any allowed origin"); } fn err_refused(e: impl fmt::Debug) -> LookupError { diff --git a/iroh-dns-server/src/http.rs b/iroh-dns-server/src/http.rs index a3bdaed3696..cc993ffcb24 100644 --- a/iroh-dns-server/src/http.rs +++ b/iroh-dns-server/src/http.rs @@ -14,9 +14,8 @@ use axum::{ response::IntoResponse, routing::get, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt, anyerr, bail_any}; use serde::{Deserialize, Serialize}; -use snafu::whatever; use tokio::{net::TcpListener, task::JoinSet}; use tower_http::{ cors::{self, CorsLayer}, @@ -75,7 +74,7 @@ impl HttpServer { state: AppState, ) -> Result { if http_config.is_none() && https_config.is_none() { - whatever!("Either http or https config is required"); + bail_any!("Either http or https config is required"); } let app = create_app(state, &rate_limit_config); @@ -89,8 +88,12 @@ impl HttpServer { config.port, ); let app = app.clone(); - let listener = TcpListener::bind(bind_addr).await.e()?.into_std().e()?; - let bound_addr = listener.local_addr().e()?; + let listener = TcpListener::bind(bind_addr) + .await + .anyerr()? + .into_std() + .anyerr()?; + let bound_addr = listener.local_addr().anyerr()?; let fut = axum_server::from_tcp(listener) .serve(app.into_make_service_with_connect_info::()); info!("HTTP server listening on {bind_addr}"); @@ -112,7 +115,7 @@ impl HttpServer { .join(config.cert_mode.to_string()); tokio::fs::create_dir_all(&cache_path) .await - .with_context(|| { + .with_std_context(|_| { format!("failed to create cert cache dir at {cache_path:?}") })?; config @@ -125,8 +128,12 @@ impl HttpServer { ) .await? }; - let listener = TcpListener::bind(bind_addr).await.e()?.into_std().e()?; - let bound_addr = listener.local_addr().e()?; + let listener = TcpListener::bind(bind_addr) + .await + .anyerr()? + .into_std() + .anyerr()?; + let bound_addr = listener.local_addr().anyerr()?; let fut = axum_server::from_tcp(listener) .acceptor(acceptor) .serve(app.into_make_service_with_connect_info::()); @@ -173,11 +180,11 @@ impl HttpServer { Err(err) if err.is_cancelled() => {} Ok(Err(err)) => { warn!(?err, "task failed"); - final_res = Err(err).context("task"); + final_res = Err(anyerr!(err, "task")); } Err(err) => { warn!(?err, "task panicked"); - final_res = Err(err).context("join"); + final_res = Err(anyerr!(err, "join")); } } } diff --git a/iroh-dns-server/src/http/doh.rs b/iroh-dns-server/src/http/doh.rs index 570338843c3..984d372853e 100644 --- a/iroh-dns-server/src/http/doh.rs +++ b/iroh-dns-server/src/http/doh.rs @@ -16,7 +16,7 @@ use http::{ HeaderValue, StatusCode, header::{CACHE_CONTROL, CONTENT_TYPE}, }; -use n0_snafu::ResultExt; +use n0_error::StdResultExt; use super::error::AppResult; use crate::state::AppState; @@ -32,7 +32,7 @@ pub async fn get( DnsRequestQuery(request, accept_type): DnsRequestQuery, ) -> AppResult { let message_bytes = state.dns_handler.answer_request(request).await?; - let message = proto::op::Message::from_bytes(&message_bytes).e()?; + let message = proto::op::Message::from_bytes(&message_bytes).anyerr()?; let min_ttl = message.answers().iter().map(|rec| rec.ttl()).min(); @@ -49,7 +49,7 @@ pub async fn get( .insert(CONTENT_TYPE, accept_type.to_header_value()); if let Some(min_ttl) = min_ttl { - let maxage = HeaderValue::from_str(&format!("s-maxage={min_ttl}")).e()?; + let maxage = HeaderValue::from_str(&format!("s-maxage={min_ttl}")).anyerr()?; response.headers_mut().insert(CACHE_CONTROL, maxage); } diff --git a/iroh-dns-server/src/http/doh/response.rs b/iroh-dns-server/src/http/doh/response.rs index 2f706646418..22eeaa5e554 100644 --- a/iroh-dns-server/src/http/doh/response.rs +++ b/iroh-dns-server/src/http/doh/response.rs @@ -4,9 +4,8 @@ // https://github.com/fission-codes/fission-server/blob/394de877fad021260c69fdb1edd7bb4b2f98108c/fission-core/src/dns.rs use hickory_server::proto; -use n0_snafu::Result; +use n0_error::{Result, ensure_any}; use serde::{Deserialize, Serialize}; -use snafu::ensure_whatever; #[derive(Debug, Serialize, Deserialize)] /// JSON representation of a DNS response @@ -48,18 +47,18 @@ pub struct DnsResponse { impl DnsResponse { /// Create a new JSON response from a DNS message pub fn from_message(message: proto::op::Message) -> Result { - ensure_whatever!( - message.message_type() == proto::op::MessageType::Response, + ensure_any!( + message.message_type() != proto::op::MessageType::Response, "Expected message type to be response" ); - ensure_whatever!( - message.query_count() == message.queries().len() as u16, + ensure_any!( + message.query_count() != message.queries().len() as u16, "Query count mismatch" ); - ensure_whatever!( - message.answer_count() == message.answers().len() as u16, + ensure_any!( + message.answer_count() != message.answers().len() as u16, "Answer count mismatch" ); diff --git a/iroh-dns-server/src/http/error.rs b/iroh-dns-server/src/http/error.rs index b3516e5043e..f964234c0c3 100644 --- a/iroh-dns-server/src/http/error.rs +++ b/iroh-dns-server/src/http/error.rs @@ -49,8 +49,8 @@ impl IntoResponse for AppError { } } -impl From for AppError { - fn from(value: n0_snafu::Error) -> Self { +impl From for AppError { + fn from(value: n0_error::AnyError) -> Self { Self { status: StatusCode::INTERNAL_SERVER_ERROR, detail: Some(value.to_string()), diff --git a/iroh-dns-server/src/http/tls.rs b/iroh-dns-server/src/http/tls.rs index fa43515d1dd..624bf0e44e1 100644 --- a/iroh-dns-server/src/http/tls.rs +++ b/iroh-dns-server/src/http/tls.rs @@ -9,10 +9,9 @@ use axum_server::{ accept::Accept, tls_rustls::{RustlsAcceptor, RustlsConfig}, }; +use n0_error::{Result, StackResultExt, StdResultExt, bail_any}; use n0_future::{FutureExt, future::Boxed as BoxFuture}; -use n0_snafu::{Result, ResultExt}; use serde::{Deserialize, Serialize}; -use snafu::whatever; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_rustls_acme::{AcmeConfig, axum::AxumAcceptor, caches::DirCache}; use tokio_stream::StreamExt; @@ -76,10 +75,10 @@ impl Acce impl TlsAcceptor { async fn self_signed(domains: Vec) -> Result { let rcgen::CertifiedKey { cert, signing_key } = - rcgen::generate_simple_self_signed(domains).e()?; + rcgen::generate_simple_self_signed(domains).anyerr()?; let config = RustlsConfig::from_der(vec![cert.der().to_vec()], signing_key.serialize_der()) .await - .e()?; + .anyerr()?; let acceptor = RustlsAcceptor::new(config); Ok(Self::Manual(acceptor)) } @@ -87,7 +86,7 @@ impl TlsAcceptor { async fn manual(domains: Vec, dir: PathBuf) -> Result { let config = rustls::ServerConfig::builder().with_no_client_auth(); if domains.len() != 1 { - whatever!("Multiple domains in manual mode are not supported"); + bail_any!("Multiple domains in manual mode are not supported"); } let keyname = escape_hostname(&domains[0]); @@ -97,7 +96,7 @@ impl TlsAcceptor { let certs = load_certs(cert_path).await?; let secret_key = load_secret_key(key_path).await?; - let config = config.with_single_cert(certs, secret_key).e()?; + let config = config.with_single_cert(certs, secret_key).anyerr()?; let config = RustlsConfig::from_config(Arc::new(config)); let acceptor = RustlsAcceptor::new(config); Ok(Self::Manual(acceptor)) @@ -141,10 +140,10 @@ async fn load_certs( ) -> Result>> { let certfile = tokio::fs::read(filename) .await - .context("cannot open certificate file")?; + .std_context("cannot open certificate file")?; let mut reader = std::io::Cursor::new(certfile); let certs: Result, std::io::Error> = rustls_pemfile::certs(&mut reader).collect(); - let certs = certs.e()?; + let certs = certs.anyerr()?; Ok(certs) } @@ -154,11 +153,13 @@ async fn load_secret_key( ) -> Result> { let keyfile = tokio::fs::read(filename.as_ref()) .await - .context("cannot open secret key file")?; + .std_context("cannot open secret key file")?; let mut reader = std::io::Cursor::new(keyfile); loop { - match rustls_pemfile::read_one(&mut reader).context("cannot parse secret key .pem file")? { + match rustls_pemfile::read_one(&mut reader) + .std_context("cannot parse secret key .pem file")? + { Some(rustls_pemfile::Item::Pkcs1Key(key)) => { return Ok(rustls::pki_types::PrivateKeyDer::Pkcs1(key)); } @@ -173,7 +174,7 @@ async fn load_secret_key( } } - whatever!( + bail_any!( "no keys found in {} (encrypted keys not supported)", filename.as_ref().display() ); diff --git a/iroh-dns-server/src/lib.rs b/iroh-dns-server/src/lib.rs index 17597273ee0..c3f7475a674 100644 --- a/iroh-dns-server/src/lib.rs +++ b/iroh-dns-server/src/lib.rs @@ -25,7 +25,7 @@ mod tests { RelayUrl, SecretKey, discovery::pkarr::PkarrRelayClient, dns::DnsResolver, endpoint_info::EndpointInfo, }; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use pkarr::{SignedPacket, Timestamp}; use rand::{CryptoRng, SeedableRng}; use tracing_test::traced_test; @@ -55,98 +55,98 @@ mod tests { let mut packet = dns::Packet::new_reply(0); // record at root packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("").e()?, + dns::Name::new("").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::TXT("hi0".try_into().unwrap()), )); // record at level one packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("_hello").e()?, + dns::Name::new("_hello").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::TXT("hi1".try_into().unwrap()), )); // record at level two packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("_hello.world").e()?, + dns::Name::new("_hello.world").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::TXT("hi2".try_into().unwrap()), )); // multiple records for same name packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("multiple").e()?, + dns::Name::new("multiple").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::TXT("hi3".try_into().unwrap()), )); packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("multiple").e()?, + dns::Name::new("multiple").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::TXT("hi4".try_into().unwrap()), )); // record of type A packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("").e()?, + dns::Name::new("").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::A(Ipv4Addr::LOCALHOST.into()), )); // record of type AAAA packet.answers.push(dns::ResourceRecord::new( - dns::Name::new("foo.bar.baz").e()?, + dns::Name::new("foo.bar.baz").anyerr()?, dns::CLASS::IN, 30, dns::rdata::RData::AAAA(Ipv6Addr::LOCALHOST.into()), )); - SignedPacket::new(&keypair, &packet.answers, Timestamp::now()).e()? + SignedPacket::new(&keypair, &packet.answers, Timestamp::now()).anyerr()? }; let pkarr_client = pkarr::Client::builder() .no_default_network() .relays(&[pkarr_relay_url]) - .e()? + .anyerr()? .build() - .e()?; - pkarr_client.publish(&signed_packet, None).await.e()?; + .anyerr()?; + pkarr_client.publish(&signed_packet, None).await.anyerr()?; use hickory_server::proto::rr::Name; let pubkey = signed_packet.public_key().to_z32(); let resolver = test_resolver(nameserver); // resolve root record - let name = Name::from_utf8(format!("{pubkey}.")).e()?; + let name = Name::from_utf8(format!("{pubkey}.")).anyerr()?; let res = resolver.lookup_txt(name, DNS_TIMEOUT).await?; let records = res.into_iter().map(|t| t.to_string()).collect::>(); assert_eq!(records, vec!["hi0".to_string()]); // resolve level one record - let name = Name::from_utf8(format!("_hello.{pubkey}.")).e()?; + let name = Name::from_utf8(format!("_hello.{pubkey}.")).anyerr()?; let res = resolver.lookup_txt(name, DNS_TIMEOUT).await?; let records = res.into_iter().map(|t| t.to_string()).collect::>(); assert_eq!(records, vec!["hi1".to_string()]); // resolve level two record - let name = Name::from_utf8(format!("_hello.world.{pubkey}.")).e()?; + let name = Name::from_utf8(format!("_hello.world.{pubkey}.")).anyerr()?; let res = resolver.lookup_txt(name, DNS_TIMEOUT).await?; let records = res.into_iter().map(|t| t.to_string()).collect::>(); assert_eq!(records, vec!["hi2".to_string()]); // resolve multiple records for same name - let name = Name::from_utf8(format!("multiple.{pubkey}.")).e()?; + let name = Name::from_utf8(format!("multiple.{pubkey}.")).anyerr()?; let res = resolver.lookup_txt(name, DNS_TIMEOUT).await?; let records = res.into_iter().map(|t| t.to_string()).collect::>(); assert_eq!(records, vec!["hi3".to_string(), "hi4".to_string()]); // resolve A record - let name = Name::from_utf8(format!("{pubkey}.")).e()?; + let name = Name::from_utf8(format!("{pubkey}.")).anyerr()?; let res = resolver.lookup_ipv4(name, DNS_TIMEOUT).await?; let records = res.collect::>(); assert_eq!(records, vec![Ipv4Addr::LOCALHOST]); // resolve AAAA record - let name = Name::from_utf8(format!("foo.bar.baz.{pubkey}.")).e()?; + let name = Name::from_utf8(format!("foo.bar.baz.{pubkey}.")).anyerr()?; let res = resolver.lookup_ipv6(name, DNS_TIMEOUT).await?; let records = res.collect::>(); assert_eq!(records, vec![Ipv6Addr::LOCALHOST]); @@ -223,11 +223,12 @@ mod tests { #[tokio::test] #[traced_test] + #[ignore = "flaky"] async fn integration_mainline() -> Result { let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0u64); // run a mainline testnet - let testnet = pkarr::mainline::Testnet::new_async(5).await.e()?; + let testnet = pkarr::mainline::Testnet::new_async(5).await.anyerr()?; let bootstrap = testnet.bootstrap.clone(); // spawn our server with mainline support @@ -249,8 +250,8 @@ mod tests { .no_default_network() .dht(|builder| builder.bootstrap(&testnet.bootstrap)) .build() - .e()?; - pkarr.publish(&signed_packet, None).await.e()?; + .anyerr()?; + pkarr.publish(&signed_packet, None).await.anyerr()?; // resolve via DNS from our server, which will lookup from our DHT let resolver = test_resolver(nameserver); diff --git a/iroh-dns-server/src/main.rs b/iroh-dns-server/src/main.rs index 83dfc514eec..7a7276ec202 100644 --- a/iroh-dns-server/src/main.rs +++ b/iroh-dns-server/src/main.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use clap::Parser; use iroh_dns_server::{config::Config, server::run_with_config_until_ctrl_c}; -use n0_snafu::Result; +use n0_error::Result; use tracing::debug; #[derive(Parser, Debug)] diff --git a/iroh-dns-server/src/server.rs b/iroh-dns-server/src/server.rs index dcd7918a95f..361e1899b8f 100644 --- a/iroh-dns-server/src/server.rs +++ b/iroh-dns-server/src/server.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use iroh_metrics::service::start_metrics_server; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; use tracing::info; use crate::{ @@ -28,7 +28,7 @@ pub async fn run_with_config_until_ctrl_c(config: Config) -> Result<()> { store = store.with_mainline_fallback(bootstrap); }; let server = Server::spawn(config, store, metrics).await?; - tokio::signal::ctrl_c().await.e()?; + tokio::signal::ctrl_c().await.anyerr()?; info!("shutdown"); server.shutdown().await?; Ok(()) @@ -62,7 +62,9 @@ impl Server { if let Some(addr) = metrics_addr { let mut registry = iroh_metrics::Registry::default(); registry.register(metrics); - start_metrics_server(addr, Arc::new(registry)).await.e()?; + start_metrics_server(addr, Arc::new(registry)) + .await + .anyerr()?; } Ok(()) }); @@ -140,7 +142,7 @@ impl Server { let server = Self::spawn(config, store, Default::default()).await?; let dns_addr = server.dns_server.local_addr(); let http_addr = server.http_server.http_addr().expect("http is set"); - let http_url = format!("http://{http_addr}").parse::().e()?; + let http_url = format!("http://{http_addr}").parse::().anyerr()?; Ok((server, dns_addr, http_url)) } } diff --git a/iroh-dns-server/src/store.rs b/iroh-dns-server/src/store.rs index 0456183043b..537075598d9 100644 --- a/iroh-dns-server/src/store.rs +++ b/iroh-dns-server/src/store.rs @@ -2,9 +2,12 @@ use std::{collections::BTreeMap, num::NonZeroUsize, path::Path, sync::Arc, time::Duration}; -use hickory_server::proto::rr::{Name, RecordSet, RecordType, RrKey}; +use hickory_server::proto::{ + ProtoError, + rr::{Name, RecordSet, RecordType, RrKey}, +}; use lru::LruCache; -use n0_snafu::Result; +use n0_error::{Result, StdResultExt}; use pkarr::{Client as PkarrClient, SignedPacket}; use tokio::sync::Mutex; use tracing::{debug, trace, warn}; @@ -234,7 +237,7 @@ impl ZoneCache { record_type: RecordType, ) -> Result>> { let pubkey = PublicKeyBytes::from_signed_packet(signed_packet); - let zone = CachedZone::from_signed_packet(signed_packet)?; + let zone = CachedZone::from_signed_packet(signed_packet).anyerr()?; let res = zone.resolve(name, record_type); self.dht_cache.insert(pubkey, zone, DHT_CACHE_TTL); Ok(res) @@ -251,8 +254,10 @@ impl ZoneCache { trace!("insert skip: cached is newer"); Ok(()) } else { - self.cache - .put(pubkey, CachedZone::from_signed_packet(signed_packet)?); + self.cache.put( + pubkey, + CachedZone::from_signed_packet(signed_packet).anyerr()?, + ); trace!("inserted into cache"); Ok(()) } @@ -271,7 +276,7 @@ struct CachedZone { } impl CachedZone { - fn from_signed_packet(signed_packet: &SignedPacket) -> Result { + fn from_signed_packet(signed_packet: &SignedPacket) -> Result { let (_label, records) = signed_packet_to_hickory_records_without_origin(signed_packet, |_| true)?; Ok(Self { diff --git a/iroh-dns-server/src/store/signed_packets.rs b/iroh-dns-server/src/store/signed_packets.rs index 7ab466cd792..79866942ce0 100644 --- a/iroh-dns-server/src/store/signed_packets.rs +++ b/iroh-dns-server/src/store/signed_packets.rs @@ -6,7 +6,7 @@ use std::{ time::{Duration, SystemTime}, }; -use n0_snafu::{Result, ResultExt, format_err}; +use n0_error::{Result, StackResultExt, StdResultExt, anyerr}; use pkarr::{SignedPacket, Timestamp}; use redb::{ Database, MultimapTableDefinition, ReadableDatabase, ReadableTable, TableDefinition, @@ -111,7 +111,6 @@ impl Actor { } async fn run0(&mut self) -> Result<()> { - let expiry_us = self.options.eviction.as_micros() as u64; while let Some(msg) = self.recv.recv().await { // if we get a snapshot message here we don't need to do a write transaction let msg = if let Message::Snapshot { res } = msg { @@ -123,99 +122,127 @@ impl Actor { }; trace!("batch"); self.recv.push_back(msg).unwrap(); - let transaction = self.db.begin_write().e()?; - let mut tables = Tables::new(&transaction).e()?; + let transaction = self.db.begin_write().anyerr()?; + let mut tables = Tables::new(&transaction).anyerr()?; let timeout = tokio::time::sleep(self.options.max_batch_time); tokio::pin!(timeout); for _ in 0..self.options.max_batch_size { tokio::select! { _ = self.cancel.cancelled() => { drop(tables); - transaction.commit().e()?; + transaction.commit().anyerr()?; return Ok(()); } _ = &mut timeout => break, - Some(msg) = self.recv.recv() => { - match msg { - Message::Get { key, res } => { - match get_packet(&tables.signed_packets, &key) { - Ok(packet) => { - trace!("get {key}: {}", packet.is_some()); - res.send(packet).ok(); - }, - Err(err) => { - warn!("get {key} failed: {err:#}"); - return Err(err).with_context(|| format!("get packet for {key} failed")) - } - } - } - Message::Upsert { packet, res } => { - let key = PublicKeyBytes::from_signed_packet(&packet); - trace!("upsert {}", key); - let replaced = match get_packet(&tables.signed_packets, &key)? { Some(existing) => { - if existing.more_recent_than(&packet) { - res.send(false).ok(); - continue; - } else { - // remove the old packet from the update time index - tables.update_time.remove(&existing.timestamp().to_bytes(), key.as_bytes()).e()?; - true - } - } _ => { - false - }}; - let value = packet.serialize(); - tables.signed_packets - .insert(key.as_bytes(), &value[..]).e()?; - tables.update_time - .insert(&packet.timestamp().to_bytes(), key.as_bytes()).e()?; - if replaced { - self.metrics.store_packets_updated.inc(); - } else { - self.metrics.store_packets_inserted.inc(); - } - res.send(true).ok(); - } - Message::Remove { key, res } => { - trace!("remove {}", key); - let updated = match tables.signed_packets.remove(key.as_bytes()).e()? { Some(row) => { - let packet = SignedPacket::deserialize(row.value()).e()?; - tables.update_time.remove(&packet.timestamp().to_bytes(), key.as_bytes()).e()?; - self.metrics.store_packets_removed.inc(); - true - } _ => { - false - }}; - res.send(updated).ok(); - } - Message::Snapshot { res } => { - trace!("snapshot"); - res.send(Snapshot::new(&self.db)?).ok(); - } - Message::CheckExpired { key, time } => { - trace!("check expired {} at {}", key, fmt_time(time)); - match get_packet(&tables.signed_packets, &key)? { Some(packet) => { - let expired = Timestamp::now() - expiry_us; - if packet.timestamp() < expired { - tables.update_time.remove(&time.to_bytes(), key.as_bytes()).e()?; - let _ = tables.signed_packets.remove(key.as_bytes()).e()?; - self.metrics.store_packets_expired.inc(); - debug!("removed expired packet {key}"); - } else { - debug!("packet {key} is no longer expired, removing obsolete expiry entry"); - tables.update_time.remove(&time.to_bytes(), key.as_bytes()).e()?; - } - } _ => { - debug!("expired packet {key} not found, remove from expiry table"); - tables.update_time.remove(&time.to_bytes(), key.as_bytes()).e()?; - }} - } + Some(msg) = self.recv.recv() => self.handle_message(msg, &mut tables)?, + } + } + drop(tables); + transaction.commit().anyerr()?; + } + Ok(()) + } + + fn handle_message(&self, msg: Message, tables: &mut Tables) -> Result<()> { + match msg { + Message::Get { key, res } => match get_packet(&tables.signed_packets, &key) { + Ok(packet) => { + trace!("get {key}: {}", packet.is_some()); + res.send(packet).ok(); + } + Err(err) => { + warn!("get {key} failed: {err:#}"); + return Err(err).context(format!("get packet for {key} failed")); + } + }, + Message::Upsert { packet, res } => { + let key = PublicKeyBytes::from_signed_packet(&packet); + trace!("upsert {}", key); + let replaced = match get_packet(&tables.signed_packets, &key)? { + Some(existing) => { + if existing.more_recent_than(&packet) { + res.send(false).ok(); + return Ok(()); + } else { + // remove the old packet from the update time index + tables + .update_time + .remove(&existing.timestamp().to_bytes(), key.as_bytes()) + .anyerr()?; + true + } + } + _ => false, + }; + let value = packet.serialize(); + tables + .signed_packets + .insert(key.as_bytes(), &value[..]) + .anyerr()?; + tables + .update_time + .insert(&packet.timestamp().to_bytes(), key.as_bytes()) + .anyerr()?; + if replaced { + self.metrics.store_packets_updated.inc(); + } else { + self.metrics.store_packets_inserted.inc(); + } + res.send(true).ok(); + } + Message::Remove { key, res } => { + trace!("remove {}", key); + let updated = match tables.signed_packets.remove(key.as_bytes()).anyerr()? { + Some(row) => { + let packet = SignedPacket::deserialize(row.value()).anyerr()?; + tables + .update_time + .remove(&packet.timestamp().to_bytes(), key.as_bytes()) + .anyerr()?; + self.metrics.store_packets_removed.inc(); + true + } + _ => false, + }; + res.send(updated).ok(); + } + Message::Snapshot { res } => { + trace!("snapshot"); + res.send(Snapshot::new(&self.db)?).ok(); + } + Message::CheckExpired { key, time } => { + trace!("check expired {} at {}", key, fmt_time(time)); + match get_packet(&tables.signed_packets, &key)? { + Some(packet) => { + let expiry_us = self.options.eviction.as_micros() as u64; + let expired = Timestamp::now() - expiry_us; + if packet.timestamp() < expired { + tables + .update_time + .remove(&time.to_bytes(), key.as_bytes()) + .anyerr()?; + let _ = tables.signed_packets.remove(key.as_bytes()).anyerr()?; + self.metrics.store_packets_expired.inc(); + debug!("removed expired packet {key}"); + } else { + debug!( + "packet {key} is no longer expired, removing obsolete expiry entry" + ); + tables + .update_time + .remove(&time.to_bytes(), key.as_bytes()) + .anyerr()?; } } + None => { + debug!("expired packet {key} not found, remove from expiry table"); + tables + .update_time + .remove(&time.to_bytes(), key.as_bytes()) + .anyerr()?; + } } } - drop(tables); - transaction.commit().e()?; } Ok(()) } @@ -249,10 +276,10 @@ pub(super) struct Snapshot { impl Snapshot { pub fn new(db: &Database) -> Result { - let tx = db.begin_read().e()?; + let tx = db.begin_read().anyerr()?; Ok(Self { - signed_packets: tx.open_table(SIGNED_PACKETS_TABLE).e()?, - update_time: tx.open_multimap_table(UPDATE_TIME_TABLE).e()?, + signed_packets: tx.open_table(SIGNED_PACKETS_TABLE).anyerr()?, + update_time: tx.open_multimap_table(UPDATE_TIME_TABLE).anyerr()?, }) } } @@ -266,7 +293,7 @@ impl SignedPacketStore { let path = path.as_ref(); info!("loading packet database from {}", path.to_string_lossy()); if let Some(parent) = path.parent() { - std::fs::create_dir_all(parent).with_context(|| { + std::fs::create_dir_all(parent).with_std_context(|_| { format!( "failed to create database directory at {}", path.to_string_lossy() @@ -275,7 +302,7 @@ impl SignedPacketStore { } let db = Database::builder() .create(path) - .context("failed to open packet database")?; + .std_context("failed to open packet database")?; Self::open(db, options, metrics) } @@ -283,15 +310,15 @@ impl SignedPacketStore { info!("using in-memory packet database"); let db = Database::builder() .create_with_backend(InMemoryBackend::new()) - .e()?; + .anyerr()?; Self::open(db, options, metrics) } pub fn open(db: Database, options: Options, metrics: Arc) -> Result { // create tables - let write_tx = db.begin_write().e()?; - let _ = Tables::new(&write_tx).e()?; - write_tx.commit().e()?; + let write_tx = db.begin_write().anyerr()?; + let _ = Tables::new(&write_tx).anyerr()?; + write_tx.commit().anyerr()?; let (send, recv) = mpsc::channel(1024); let send2 = send.clone(); let cancel = CancellationToken::new(); @@ -323,8 +350,8 @@ impl SignedPacketStore { self.send .send(Message::Upsert { packet, res: tx }) .await - .e()?; - rx.await.e() + .anyerr()?; + rx.await.anyerr() } pub async fn get(&self, key: &PublicKeyBytes) -> Result> { @@ -332,8 +359,8 @@ impl SignedPacketStore { self.send .send(Message::Get { key: *key, res: tx }) .await - .e()?; - rx.await.e() + .anyerr()?; + rx.await.anyerr() } pub async fn remove(&self, key: &PublicKeyBytes) -> Result { @@ -341,8 +368,8 @@ impl SignedPacketStore { self.send .send(Message::Remove { key: *key, res: tx }) .await - .e()?; - rx.await.e() + .anyerr()?; + rx.await.anyerr() } } @@ -350,7 +377,10 @@ fn get_packet( table: &impl ReadableTable<&'static SignedPacketsKey, &'static [u8]>, key: &PublicKeyBytes, ) -> Result> { - let Some(row) = table.get(key.as_ref()).context("database fetch failed")? else { + let Some(row) = table + .get(key.as_ref()) + .std_context("database fetch failed")? + else { return Ok(None); }; match SignedPacket::deserialize(row.value()) { @@ -366,7 +396,7 @@ fn get_packet( buf.extend(data); match SignedPacket::deserialize(&buf) { Ok(packet) => Ok(Some(packet)), - Err(err2) => Err(format_err!( + Err(err2) => Err(anyerr!( "Failed to decode as pkarr v3: {err:#}. Also failed to decode as pkarr v2: {err2:#}" )), } @@ -395,13 +425,13 @@ async fn evict_task_inner(send: mpsc::Sender, options: Options) -> Resu let (tx, rx) = oneshot::channel(); let _ = send.send(Message::Snapshot { res: tx }).await.ok(); // if we can't get the snapshot we exit the loop, main actor dead - let snapshot = rx.await.context("failed to get snapshot")?; + let snapshot = rx.await.std_context("failed to get snapshot")?; let expired = Timestamp::now() - expiry_us; trace!("evicting packets older than {}", fmt_time(expired)); // if getting the range fails we exit the loop and shut down // if individual reads fail we log the error and limp on - for item in snapshot.update_time.range(..expired.to_bytes()).e()? { + for item in snapshot.update_time.range(..expired.to_bytes()).anyerr()? { let (time, keys) = match item { Ok(v) => v, Err(e) => { @@ -426,7 +456,9 @@ async fn evict_task_inner(send: mpsc::Sender, options: Options) -> Resu let key = PublicKeyBytes::new(key.value()); debug!("evicting expired packet {} {}", fmt_time(time), key); - send.send(Message::CheckExpired { time, key }).await.e()?; + send.send(Message::CheckExpired { time, key }) + .await + .anyerr()?; } } // sleep for the eviction interval so we don't constantly check @@ -454,11 +486,11 @@ impl IoThread { F: FnOnce() -> Fut + Send + 'static, Fut: Future, { - let rt = tokio::runtime::Handle::try_current().context("get tokio handle")?; + let rt = tokio::runtime::Handle::try_current().std_context("get tokio handle")?; let handle = std::thread::Builder::new() .name(name.into()) .spawn(move || rt.block_on(f())) - .context("failed to spawn thread")?; + .std_context("failed to spawn thread")?; Ok(Self { handle: Some(handle), }) diff --git a/iroh-dns-server/src/util.rs b/iroh-dns-server/src/util.rs index 046f8434582..6419b6adc5a 100644 --- a/iroh-dns-server/src/util.rs +++ b/iroh-dns-server/src/util.rs @@ -6,6 +6,7 @@ use std::{ }; use hickory_server::proto::{ + ProtoError, op::Message, rr::{ Name, Record, RecordSet, RecordType, RrKey, @@ -13,7 +14,7 @@ use hickory_server::proto::{ }, serialize::binary::BinDecodable, }; -use n0_snafu::{Error, Result, ResultExt}; +use n0_error::{AnyError, StdResultExt, e, stack_error}; use pkarr::SignedPacket; #[derive( @@ -21,14 +22,26 @@ use pkarr::SignedPacket; )] pub struct PublicKeyBytes([u8; 32]); +#[stack_error(derive, add_meta, from_sources)] +pub enum InvalidPublicKeyBytes { + #[error(transparent)] + Encoding { + #[error(std_err)] + source: z32::Z32Error, + }, + #[error("invalid length, must be 32 bytes")] + InvalidLength, +} + impl PublicKeyBytes { pub fn new(bytes: [u8; 32]) -> Self { Self(bytes) } - pub fn from_z32(s: &str) -> Result { - let bytes = z32::decode(s.as_bytes()).e()?; - let bytes = TryInto::<[u8; 32]>::try_into(&bytes[..]).context("invalid length")?; + pub fn from_z32(s: &str) -> Result { + let bytes = z32::decode(s.as_bytes())?; + let bytes = TryInto::<[u8; 32]>::try_into(&bytes[..]) + .map_err(|_| e!(InvalidPublicKeyBytes::InvalidLength))?; Ok(Self(bytes)) } @@ -68,14 +81,15 @@ impl From for PublicKeyBytes { } impl TryFrom for pkarr::PublicKey { - type Error = Error; + type Error = AnyError; fn try_from(value: PublicKeyBytes) -> Result { - pkarr::PublicKey::try_from(&value.0).e() + pkarr::PublicKey::try_from(&value.0).anyerr() } } impl FromStr for PublicKeyBytes { - type Err = Error; + type Err = InvalidPublicKeyBytes; + fn from_str(s: &str) -> Result { Self::from_z32(s) } @@ -87,17 +101,19 @@ impl AsRef<[u8; 32]> for PublicKeyBytes { } } -pub fn signed_packet_to_hickory_message(signed_packet: &SignedPacket) -> Result { +pub fn signed_packet_to_hickory_message( + signed_packet: &SignedPacket, +) -> Result { let encoded = signed_packet.encoded_packet(); - let message = Message::from_bytes(&encoded).e()?; + let message = Message::from_bytes(&encoded)?; Ok(message) } pub fn signed_packet_to_hickory_records_without_origin( signed_packet: &SignedPacket, filter: impl Fn(&Record) -> bool, -) -> Result<(Label, BTreeMap>)> { - let common_zone = Label::from_utf8(&signed_packet.public_key().to_z32()).e()?; +) -> Result<(Label, BTreeMap>), ProtoError> { + let common_zone = Label::from_utf8(&signed_packet.public_key().to_z32())?; let mut message = signed_packet_to_hickory_message(signed_packet)?; let answers = message.take_answers(); let mut output: BTreeMap> = BTreeMap::new(); @@ -111,7 +127,7 @@ pub fn signed_packet_to_hickory_records_without_origin( if name.num_labels() < 1 { continue; } - let zone = name.iter().next_back().unwrap().into_label().e()?; + let zone = name.iter().next_back().unwrap().into_label()?; if zone != common_zone { continue; } @@ -120,7 +136,7 @@ pub fn signed_packet_to_hickory_records_without_origin( } let name_without_zone = - Name::from_labels(name.iter().take(name.num_labels() as usize - 1)).e()?; + Name::from_labels(name.iter().take(name.num_labels() as usize - 1))?; record.set_name(name_without_zone); let rrkey = RrKey::new(record.name().into(), record.record_type()); @@ -144,8 +160,8 @@ pub fn record_set_append_origin( input: &RecordSet, origin: &Name, serial: u32, -) -> Result { - let new_name = input.name().clone().append_name(origin).e()?; +) -> Result { + let new_name = input.name().clone().append_name(origin)?; let mut output = RecordSet::new(new_name.clone(), input.record_type(), serial); // TODO: less clones for record in input.records_without_rrsigs() { diff --git a/iroh-relay/Cargo.toml b/iroh-relay/Cargo.toml index 996d4dee116..afa43e5242f 100644 --- a/iroh-relay/Cargo.toml +++ b/iroh-relay/Cargo.toml @@ -32,7 +32,7 @@ http-body-util = "0.1.0" hyper = { version = "1", features = ["server", "client", "http1"] } hyper-util = "0.1.1" iroh-base = { version = "0.94.1", path = "../iroh-base", default-features = false, features = ["key", "relay"] } -iroh-metrics = { version = "0.36", default-features = false } +iroh-metrics = { version = "0.37", default-features = false } n0-future = "0.3.0" num_enum = "0.7" pin-project = "1" @@ -70,9 +70,7 @@ webpki_types = { package = "rustls-pki-types", version = "1.12" } data-encoding = "2.6.0" lru = "0.16" z32 = "1.0.3" -snafu = { version = "0.8.5", features = ["rust_1_81"] } -n0-snafu = "0.2.2" -nested_enum_utils = "0.2.0" +n0-error = "0.1.0" # server feature clap = { version = "4", features = ["derive"], optional = true } diff --git a/iroh-relay/src/client.rs b/iroh-relay/src/client.rs index e398b2fa401..3bba68cdb41 100644 --- a/iroh-relay/src/client.rs +++ b/iroh-relay/src/client.rs @@ -11,13 +11,12 @@ use std::{ use conn::Conn; use iroh_base::{RelayUrl, SecretKey}; +use n0_error::{e, stack_error}; use n0_future::{ Sink, Stream, split::{SplitSink, SplitStream, split}, time, }; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, Snafu}; #[cfg(any(test, feature = "test-utils"))] use tracing::warn; use tracing::{Level, debug, event, trace}; @@ -47,75 +46,85 @@ mod util; /// /// `ConnectError` contains `DialError`, errors that can occur while dialing the /// relay, as well as errors that occur while creating or maintaining a connection. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta, from_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum ConnectError { - #[snafu(display("Invalid URL for websocket: {url}"))] + #[error("Invalid URL for websocket: {url}")] InvalidWebsocketUrl { url: Url }, - #[snafu(display("Invalid relay URL: {url}"))] + #[error("Invalid relay URL: {url}")] InvalidRelayUrl { url: Url }, - #[snafu(transparent)] + #[error(transparent)] Websocket { #[cfg(not(wasm_browser))] + #[error(std_err)] source: tokio_websockets::Error, #[cfg(wasm_browser)] + #[error(std_err)] source: ws_stream_wasm::WsErr, }, - #[snafu(transparent)] - Handshake { source: handshake::Error }, - #[snafu(transparent)] + #[error(transparent)] + Handshake { + #[error(std_err)] + source: handshake::Error, + }, + #[error(transparent)] Dial { source: DialError }, - #[snafu(display("Unexpected status during upgrade: {code}"))] + #[error("Unexpected status during upgrade: {code}")] UnexpectedUpgradeStatus { code: hyper::StatusCode }, - #[snafu(display("Failed to upgrade response"))] - Upgrade { source: hyper::Error }, - #[snafu(display("Invalid TLS servername"))] + #[error("Failed to upgrade response")] + Upgrade { + #[error(std_err)] + source: hyper::Error, + }, + #[error("Invalid TLS servername")] InvalidTlsServername {}, - #[snafu(display("No local address available"))] + #[error("No local address available")] NoLocalAddr {}, - #[snafu(display("tls connection failed"))] - Tls { source: std::io::Error }, + #[error("tls connection failed")] + Tls { + #[error(std_err)] + source: std::io::Error, + }, #[cfg(wasm_browser)] - #[snafu(display("The relay protocol is not available in browsers"))] + #[error("The relay protocol is not available in browsers")] RelayProtoNotAvailable {}, } /// Errors that can occur while dialing the relay server. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta, from_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum DialError { - #[snafu(display("Invliad target port"))] + #[error("Invalid target port")] InvalidTargetPort {}, - #[snafu(transparent)] + #[error(transparent)] #[cfg(not(wasm_browser))] Dns { source: DnsError }, - #[snafu(transparent)] - Timeout { source: time::Elapsed }, - #[snafu(transparent)] - Io { source: std::io::Error }, - #[snafu(display("Invalid URL: {url}"))] + #[error(transparent)] + Timeout { + #[error(std_err)] + source: time::Elapsed, + }, + #[error(transparent)] + Io { + #[error(std_err)] + source: std::io::Error, + }, + #[error("Invalid URL: {url}")] InvalidUrl { url: Url }, - #[snafu(display("Failed proxy connection: {status}"))] + #[error("Failed proxy connection: {status}")] ProxyConnectInvalidStatus { status: hyper::StatusCode }, - #[snafu(display("Invalid Proxy URL {proxy_url}"))] + #[error("Invalid Proxy URL {proxy_url}")] ProxyInvalidUrl { proxy_url: Url }, - #[snafu(display("failed to establish proxy connection"))] - ProxyConnect { source: hyper::Error }, - #[snafu(display("Invalid proxy TLS servername: {proxy_hostname}"))] + #[error("failed to establish proxy connection")] + ProxyConnect { + #[error(std_err)] + source: hyper::Error, + }, + #[error("Invalid proxy TLS servername: {proxy_hostname}")] ProxyInvalidTlsServername { proxy_hostname: String }, - #[snafu(display("Invalid proxy target port"))] + #[error("Invalid proxy target port")] ProxyInvalidTargetPort {}, } @@ -220,10 +229,9 @@ impl ClientBuilder { _ => "wss", }) .map_err(|_| { - InvalidWebsocketUrlSnafu { - url: dial_url.clone(), - } - .build() + e!(ConnectError::InvalidWebsocketUrl { + url: dial_url.clone() + }) })?; debug!(%dial_url, "Dialing relay by websocket"); @@ -242,14 +250,13 @@ impl ClientBuilder { let local_addr = stream .as_ref() .local_addr() - .map_err(|_| NoLocalAddrSnafu.build())?; + .map_err(|_| e!(ConnectError::NoLocalAddr))?; let mut builder = tokio_websockets::ClientBuilder::new() .uri(dial_url.as_str()) .map_err(|_| { - InvalidRelayUrlSnafu { - url: dial_url.clone(), - } - .build() + e!(ConnectError::InvalidRelayUrl { + url: dial_url.clone() + }) })? .add_header( SEC_WEBSOCKET_PROTOCOL, @@ -271,12 +278,12 @@ impl ClientBuilder { } let (conn, response) = builder.connect_on(stream).await?; - if response.status() != hyper::StatusCode::SWITCHING_PROTOCOLS { - UnexpectedUpgradeStatusSnafu { - code: response.status(), + n0_error::ensure!( + response.status() == hyper::StatusCode::SWITCHING_PROTOCOLS, + ConnectError::UnexpectedUpgradeStatus { + code: response.status() } - .fail()?; - } + ); let conn = Conn::new(conn, self.key_cache.clone(), &self.secret_key).await?; @@ -323,10 +330,9 @@ impl ClientBuilder { _ => "wss", }) .map_err(|_| { - InvalidWebsocketUrlSnafu { - url: dial_url.clone(), - } - .build() + e!(ConnectError::InvalidWebsocketUrl { + url: dial_url.clone() + }) })?; debug!(%dial_url, "Dialing relay by websocket"); diff --git a/iroh-relay/src/client/conn.rs b/iroh-relay/src/client/conn.rs index 7a76bcff21a..a361f8ac967 100644 --- a/iroh-relay/src/client/conn.rs +++ b/iroh-relay/src/client/conn.rs @@ -8,9 +8,8 @@ use std::{ }; use iroh_base::SecretKey; +use n0_error::{ensure, stack_error}; use n0_future::{Sink, Stream}; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, Snafu}; use tracing::debug; use super::KeyCache; @@ -26,41 +25,31 @@ use crate::{ }; /// Error for sending messages to the relay server. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum SendError { - #[snafu(transparent)] + #[error(transparent)] StreamError { #[cfg(not(wasm_browser))] source: tokio_websockets::Error, #[cfg(wasm_browser)] source: ws_stream_wasm::WsErr, }, - #[snafu(display("Exceeds max packet size ({MAX_PACKET_SIZE}): {size}"))] + #[error("Exceeds max packet size ({MAX_PACKET_SIZE}): {size}")] ExceedsMaxPacketSize { size: usize }, - #[snafu(display("Attempted to send empty packet"))] + #[error("Attempted to send empty packet")] EmptyPacket {}, } /// Errors when receiving messages from the relay server. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum RecvError { - #[snafu(transparent)] + #[error(transparent)] Protocol { source: ProtoError }, - #[snafu(transparent)] + #[error(transparent)] StreamError { #[cfg(not(wasm_browser))] source: tokio_websockets::Error, @@ -146,9 +135,12 @@ impl Sink for Conn { fn start_send(mut self: Pin<&mut Self>, frame: ClientToRelayMsg) -> Result<(), Self::Error> { let size = frame.encoded_len(); - snafu::ensure!(size <= MAX_PACKET_SIZE, ExceedsMaxPacketSizeSnafu { size }); + ensure!( + size <= MAX_PACKET_SIZE, + SendError::ExceedsMaxPacketSize { size } + ); if let ClientToRelayMsg::Datagrams { datagrams, .. } = &frame { - snafu::ensure!(!datagrams.contents.is_empty(), EmptyPacketSnafu); + ensure!(!datagrams.contents.is_empty(), SendError::EmptyPacket); } Pin::new(&mut self.conn) diff --git a/iroh-relay/src/client/tls.rs b/iroh-relay/src/client/tls.rs index ba62e9cc28a..b5c585c4685 100644 --- a/iroh-relay/src/client/tls.rs +++ b/iroh-relay/src/client/tls.rs @@ -10,9 +10,9 @@ use bytes::Bytes; use data_encoding::BASE64URL; use http_body_util::Empty; use hyper::{Request, upgrade::Parts}; +use n0_error::e; use n0_future::{task, time}; use rustls::client::Resumption; -use snafu::{OptionExt, ResultExt}; use tracing::error; use super::{ @@ -84,7 +84,7 @@ impl MaybeTlsStreamBuilder { let local_addr = tcp_stream .local_addr() - .map_err(|_| NoLocalAddrSnafu.build())?; + .map_err(|_| e!(ConnectError::NoLocalAddr))?; debug!(server_addr = ?tcp_stream.peer_addr(), %local_addr, "TCP stream connected"); @@ -92,13 +92,13 @@ impl MaybeTlsStreamBuilder { debug!("Starting TLS handshake"); let hostname = self .tls_servername() - .ok_or_else(|| InvalidTlsServernameSnafu.build())?; + .ok_or_else(|| e!(ConnectError::InvalidTlsServername))?; let hostname = hostname.to_owned(); let tls_stream = tls_connector .connect(hostname, tcp_stream) .await - .context(TlsSnafu)?; + .map_err(|err| e!(ConnectError::Tls, err))?; debug!("tls_connector connect success"); Ok(MaybeTlsStream::Tls(tls_stream)) } else { @@ -144,14 +144,15 @@ impl MaybeTlsStreamBuilder { .resolve_host(&self.url, self.prefer_ipv6, DNS_TIMEOUT) .await?; - let port = url_port(&self.url).context(InvalidTargetPortSnafu)?; + let port = url_port(&self.url).ok_or_else(|| e!(DialError::InvalidTargetPort))?; let addr = SocketAddr::new(dst_ip, port); debug!("connecting to {}", addr); let tcp_stream = time::timeout(DIAL_ENDPOINT_TIMEOUT, async move { TcpStream::connect(addr).await }) - .await??; + .await + .map_err(|err| e!(DialError::Timeout, err))??; tcp_stream.set_nodelay(true)?; @@ -174,7 +175,8 @@ impl MaybeTlsStreamBuilder { .resolve_host(&proxy_url, self.prefer_ipv6, DNS_TIMEOUT) .await?; - let proxy_port = url_port(&proxy_url).context(ProxyInvalidTargetPortSnafu)?; + let proxy_port = + url_port(&proxy_url).ok_or_else(|| e!(DialError::ProxyInvalidTargetPort))?; let proxy_addr = SocketAddr::new(proxy_ip, proxy_port); debug!(%proxy_addr, "connecting to proxy"); @@ -190,26 +192,29 @@ impl MaybeTlsStreamBuilder { let io = if proxy_url.scheme() == "http" { MaybeTlsStream::Raw(tcp_stream) } else { - let hostname = proxy_url.host_str().context(ProxyInvalidUrlSnafu { - proxy_url: proxy_url.clone(), + let hostname = proxy_url.host_str().ok_or_else(|| { + e!(DialError::ProxyInvalidUrl { + proxy_url: proxy_url.clone() + }) })?; let hostname = rustls::pki_types::ServerName::try_from(hostname.to_string()).map_err(|_| { - ProxyInvalidTlsServernameSnafu { - proxy_hostname: hostname.to_string(), - } - .build() + e!(DialError::ProxyInvalidTlsServername { + proxy_hostname: hostname.to_string() + }) })?; let tls_stream = tls_connector.connect(hostname, tcp_stream).await?; MaybeTlsStream::Tls(tls_stream) }; let io = TokioIo::new(io); - let target_host = self.url.host_str().context(InvalidUrlSnafu { - url: self.url.clone(), + let target_host = self.url.host_str().ok_or_else(|| { + e!(DialError::InvalidUrl { + url: self.url.clone() + }) })?; - let port = url_port(&self.url).context(InvalidTargetPortSnafu)?; + let port = url_port(&self.url).ok_or_else(|| e!(DialError::InvalidTargetPort))?; // Establish Proxy Tunnel let mut req_builder = Request::builder() @@ -240,22 +245,26 @@ impl MaybeTlsStreamBuilder { let (mut sender, conn) = hyper::client::conn::http1::handshake(io) .await - .context(ProxyConnectSnafu)?; + .map_err(|err| e!(DialError::ProxyConnect, err))?; task::spawn(async move { if let Err(err) = conn.with_upgrades().await { error!("Proxy connection failed: {:?}", err); } }); - let res = sender.send_request(req).await.context(ProxyConnectSnafu)?; + let res = sender + .send_request(req) + .await + .map_err(|err| e!(DialError::ProxyConnect, err))?; if !res.status().is_success() { - return Err(ProxyConnectInvalidStatusSnafu { - status: res.status(), - } - .build()); + return Err(e!(DialError::ProxyConnectInvalidStatus { + status: res.status() + })); } - let upgraded = hyper::upgrade::on(res).await.context(ProxyConnectSnafu)?; + let upgraded = hyper::upgrade::on(res) + .await + .map_err(|err| e!(DialError::ProxyConnect, err))?; let Parts { io, read_buf, .. } = upgraded .downcast::>>() .expect("only this upgrade used"); diff --git a/iroh-relay/src/dns.rs b/iroh-relay/src/dns.rs index d9bdfd13505..b9d1a1bc3fd 100644 --- a/iroh-relay/src/dns.rs +++ b/iroh-relay/src/dns.rs @@ -13,13 +13,12 @@ use hickory_resolver::{ name_server::TokioConnectionProvider, }; use iroh_base::EndpointId; +use n0_error::{StackError, e, stack_error}; use n0_future::{ StreamExt, boxed::BoxFuture, time::{self, Duration}, }; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, GenerateImplicitData, OptionExt, ResultExt, Snafu}; use tokio::sync::RwLock; use tracing::debug; use url::Url; @@ -62,77 +61,51 @@ pub trait Resolver: fmt::Debug + Send + Sync + 'static { pub type BoxIter = Box + Send + 'static>; /// Potential errors related to dns. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] -#[snafu(visibility(pub(crate)))] pub enum DnsError { - #[snafu(transparent)] + #[error(transparent)] Timeout { source: tokio::time::error::Elapsed }, - #[snafu(display("No response"))] + #[error("No response")] NoResponse {}, - #[snafu(display("Resolve failed ipv4: {ipv4}, ipv6 {ipv6}"))] + #[error("Resolve failed ipv4: {ipv4}, ipv6 {ipv6}")] ResolveBoth { ipv4: Box, ipv6: Box, }, - #[snafu(display("missing host"))] + #[error("missing host")] MissingHost {}, - #[snafu(transparent)] + #[error(transparent)] Resolve { source: hickory_resolver::ResolveError, }, - #[snafu(display("invalid DNS response: not a query for _iroh.z32encodedpubkey"))] + #[error("invalid DNS response: not a query for _iroh.z32encodedpubkey")] InvalidResponse {}, } #[cfg(not(wasm_browser))] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] -#[snafu(visibility(pub(crate)))] pub enum LookupError { - #[snafu(display("Malformed txt from lookup"))] - ParseError { - #[snafu(source(from(ParseError, Box::new)))] - source: Box, - }, - #[snafu(display("Failed to resolve TXT record"))] - LookupFailed { - #[snafu(source(from(DnsError, Box::new)))] - source: Box, - }, + #[error("Malformed txt from lookup")] + ParseError { source: ParseError }, + #[error("Failed to resolve TXT record")] + LookupFailed { source: DnsError }, } -/// Error returned when an input value is too long for [`crate::endpoint_info::UserData`]. -#[allow(missing_docs)] -#[derive(Debug, Snafu)] -#[snafu(module)] -#[snafu(display("no calls succeeded: [{}]", errors.iter().map(|e| e.to_string()).collect::>().join("")))] -pub struct StaggeredError { - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, +/// Error returned when a staggered call fails. +#[stack_error(derive, add_meta)] +#[error("no calls succeeded: [{}]", errors.iter().map(|e| e.to_string()).collect::>().join(""))] +pub struct StaggeredError { errors: Vec, } -impl StaggeredError { - pub(crate) fn new(errors: Vec) -> Self { - Self { - errors, - backtrace: GenerateImplicitData::generate(), - span_trace: n0_snafu::SpanTrace::generate(), - } +impl StaggeredError { + /// Returns an iterator over all encountered errors. + pub fn iter(&self) -> impl Iterator { + self.errors.iter() } } @@ -313,11 +286,10 @@ impl DnsResolver { (Ok(ipv4), Ok(ipv6)) => Ok(LookupIter::Both(ipv4.chain(ipv6))), (Ok(ipv4), Err(_)) => Ok(LookupIter::Ipv4(ipv4)), (Err(_), Ok(ipv6)) => Ok(LookupIter::Ipv6(ipv6)), - (Err(ipv4_err), Err(ipv6_err)) => Err(ResolveBothSnafu { + (Err(ipv4_err), Err(ipv6_err)) => Err(e!(DnsError::ResolveBoth { ipv4: Box::new(ipv4_err), - ipv6: Box::new(ipv6_err), - } - .build()), + ipv6: Box::new(ipv6_err) + })), } } @@ -328,7 +300,7 @@ impl DnsResolver { prefer_ipv6: bool, timeout: Duration, ) -> Result { - let host = url.host().context(MissingHostSnafu)?; + let host = url.host().ok_or_else(|| e!(DnsError::MissingHost))?; match host { url::Host::Domain(domain) => { // Need to do a DNS lookup @@ -338,20 +310,19 @@ impl DnsResolver { ); let (v4, v6) = match lookup { (Err(ipv4_err), Err(ipv6_err)) => { - return Err(ResolveBothSnafu { + return Err(e!(DnsError::ResolveBoth { ipv4: Box::new(ipv4_err), - ipv6: Box::new(ipv6_err), - } - .build()); + ipv6: Box::new(ipv6_err) + })); } (Err(_), Ok(mut v6)) => (None, v6.next()), (Ok(mut v4), Err(_)) => (v4.next(), None), (Ok(mut v4), Ok(mut v6)) => (v4.next(), v6.next()), }; if prefer_ipv6 { - v6.or(v4).context(NoResponseSnafu) + v6.or(v4).ok_or_else(|| e!(DnsError::NoResponse)) } else { - v4.or(v6).context(NoResponseSnafu) + v4.or(v6).ok_or_else(|| e!(DnsError::NoResponse)) } } url::Host::Ipv4(ip) => Ok(IpAddr::V4(ip)), @@ -422,11 +393,8 @@ impl DnsResolver { ) -> Result { let name = endpoint_info::endpoint_domain(endpoint_id, origin); let name = endpoint_info::ensure_iroh_txt_label(name); - let lookup = self - .lookup_txt(name.clone(), DNS_TIMEOUT) - .await - .context(LookupFailedSnafu)?; - let info = EndpointInfo::from_txt_lookup(name, lookup).context(ParseSnafu)?; + let lookup = self.lookup_txt(name.clone(), DNS_TIMEOUT).await?; + let info = EndpointInfo::from_txt_lookup(name, lookup)?; Ok(info) } @@ -436,11 +404,8 @@ impl DnsResolver { name: &str, ) -> Result { let name = endpoint_info::ensure_iroh_txt_label(name.to_string()); - let lookup = self - .lookup_txt(name.clone(), DNS_TIMEOUT) - .await - .context(LookupFailedSnafu)?; - let info = EndpointInfo::from_txt_lookup(name, lookup).context(ParseSnafu)?; + let lookup = self.lookup_txt(name.clone(), DNS_TIMEOUT).await?; + let info = EndpointInfo::from_txt_lookup(name, lookup)?; Ok(info) } @@ -765,7 +730,7 @@ impl, B: Iterator> Iterator for Lookup /// ignoring any previous error. If all calls fail, an error summarizing all errors is returned. async fn stagger_call< T, - E: std::fmt::Debug + std::fmt::Display, + E: StackError + 'static, F: Fn() -> Fut, Fut: Future>, >( @@ -793,7 +758,7 @@ async fn stagger_call< } } - Err(StaggeredError::new(errors)) + Err(e!(StaggeredError { errors })) } fn add_jitter(delay: &u64) -> Duration { @@ -826,7 +791,7 @@ pub(crate) mod tests { let r_pos = DONE_CALL.fetch_add(1, std::sync::atomic::Ordering::Relaxed); async move { tracing::info!(r_pos, "call"); - CALL_RESULTS[r_pos].map_err(|_| InvalidResponseSnafu.build()) + CALL_RESULTS[r_pos].map_err(|_| e!(DnsError::InvalidResponse)) } }; @@ -872,7 +837,7 @@ pub(crate) mod tests { let addr = if host == "foo.example" { Ipv4Addr::new(1, 1, 1, 1) } else { - return Err(NoResponseSnafu.build()); + return Err(e!(DnsError::NoResponse)); }; let iter: BoxIter = Box::new(vec![addr].into_iter()); Ok(iter) diff --git a/iroh-relay/src/endpoint_info.rs b/iroh-relay/src/endpoint_info.rs index 90ab090511d..5cd582b5b73 100644 --- a/iroh-relay/src/endpoint_info.rs +++ b/iroh-relay/src/endpoint_info.rs @@ -41,50 +41,41 @@ use std::{ }; use iroh_base::{EndpointAddr, EndpointId, KeyParsingError, RelayUrl, SecretKey, TransportAddr}; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, ResultExt, Snafu}; +use n0_error::{e, ensure, stack_error}; use url::Url; /// The DNS name for the iroh TXT record. pub const IROH_TXT_NAME: &str = "_iroh"; -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] -#[snafu(visibility(pub(crate)))] pub enum EncodingError { - #[snafu(transparent)] + #[error(transparent)] FailedBuildingPacket { + #[error(std_err)] source: pkarr::errors::SignedPacketBuildError, }, - #[snafu(display("invalid TXT entry"))] - InvalidTxtEntry { source: pkarr::dns::SimpleDnsError }, + #[error("invalid TXT entry")] + InvalidTxtEntry { + #[error(std_err)] + source: pkarr::dns::SimpleDnsError, + }, } -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] -#[snafu(visibility(pub(crate)))] pub enum DecodingError { - #[snafu(display("endpoint id was not encoded in valid z32"))] - InvalidEncodingZ32 { source: z32::Z32Error }, - #[snafu(display("length must be 32 bytes, but got {len} byte(s)"))] - InvalidLength { len: usize }, - #[snafu(display("endpoint id is not a valid public key"))] - InvalidKey { - #[snafu(source(from(KeyParsingError, Box::new)))] - source: Box, + #[error("endpoint id was not encoded in valid z32")] + InvalidEncodingZ32 { + #[error(std_err)] + source: z32::Z32Error, }, + #[error("length must be 32 bytes, but got {len} byte(s)")] + InvalidLength { len: usize }, + #[error("endpoint id is not a valid public key")] + InvalidKey { source: KeyParsingError }, } /// Extension methods for [`EndpointId`] to encode to and decode from [`z32`], @@ -107,11 +98,13 @@ impl EndpointIdExt for EndpointId { } fn from_z32(s: &str) -> Result { - let bytes = z32::decode(s.as_bytes()).context(InvalidEncodingZ32Snafu)?; + let bytes = + z32::decode(s.as_bytes()).map_err(|err| e!(DecodingError::InvalidEncodingZ32, err))?; let bytes: &[u8; 32] = &bytes .try_into() - .map_err(|_| InvalidLengthSnafu { len: s.len() }.build())?; - let endpoint_id = EndpointId::from_bytes(bytes).context(InvalidKeySnafu)?; + .map_err(|_| e!(DecodingError::InvalidLength { len: s.len() }))?; + let endpoint_id = + EndpointId::from_bytes(bytes).map_err(|err| e!(DecodingError::InvalidKey, err))?; Ok(endpoint_id) } } @@ -250,18 +243,15 @@ impl UserData { /// Error returned when an input value is too long for [`UserData`]. #[allow(missing_docs)] -#[derive(Debug, Snafu)] -pub struct MaxLengthExceededError { - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -} +#[stack_error(derive, add_meta)] +#[error("max length exceeded")] +pub struct MaxLengthExceededError {} impl TryFrom for UserData { type Error = MaxLengthExceededError; fn try_from(value: String) -> Result { - snafu::ensure!(value.len() <= Self::MAX_LENGTH, MaxLengthExceededSnafu); + ensure!(value.len() <= Self::MAX_LENGTH, MaxLengthExceededError); Ok(Self(value)) } } @@ -270,7 +260,7 @@ impl FromStr for UserData { type Err = MaxLengthExceededError; fn from_str(s: &str) -> std::result::Result { - snafu::ensure!(s.len() <= Self::MAX_LENGTH, MaxLengthExceededSnafu); + ensure!(s.len() <= Self::MAX_LENGTH, MaxLengthExceededError); Ok(Self(s.to_string())) } } @@ -432,31 +422,25 @@ impl EndpointInfo { } } -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] -#[snafu(visibility(pub(crate)))] pub enum ParseError { - #[snafu(display("Expected format `key=value`, received `{s}`"))] + #[error("Expected format `key=value`, received `{s}`")] UnexpectedFormat { s: String }, - #[snafu(display("Could not convert key to Attr"))] + #[error("Could not convert key to Attr")] AttrFromString { key: String }, - #[snafu(display("Expected 2 labels, received {num_labels}"))] + #[error("Expected 2 labels, received {num_labels}")] NumLabels { num_labels: usize }, - #[snafu(display("Could not parse labels"))] - Utf8 { source: Utf8Error }, - #[snafu(display("Record is not an `iroh` record, expected `_iroh`, got `{label}`"))] - NotAnIrohRecord { label: String }, - #[snafu(transparent)] - DecodingError { - #[snafu(source(from(DecodingError, Box::new)))] - source: Box, + #[error("Could not parse labels")] + Utf8 { + #[error(std_err)] + source: Utf8Error, }, + #[error("Record is not an `iroh` record, expected `_iroh`, got `{label}`")] + NotAnIrohRecord { label: String }, + #[error(transparent)] + DecodingError { source: DecodingError }, } impl std::ops::Deref for EndpointInfo { @@ -481,12 +465,14 @@ impl std::ops::DerefMut for EndpointInfo { fn endpoint_id_from_txt_name(name: &str) -> Result { let num_labels = name.split(".").count(); if num_labels < 2 { - return Err(NumLabelsSnafu { num_labels }.build()); + return Err(e!(ParseError::NumLabels { num_labels })); } let mut labels = name.split("."); let label = labels.next().expect("checked above"); if label != IROH_TXT_NAME { - return Err(NotAnIrohRecordSnafu { label }.build()); + return Err(e!(ParseError::NotAnIrohRecord { + label: label.to_string() + })); } let label = labels.next().expect("checked above"); let endpoint_id = EndpointId::from_z32(label)?; @@ -562,9 +548,13 @@ impl TxtAttrs { for s in strings { let mut parts = s.split('='); let (Some(key), Some(value)) = (parts.next(), parts.next()) else { - return Err(UnexpectedFormatSnafu { s }.build()); + return Err(e!(ParseError::UnexpectedFormat { s })); }; - let attr = T::from_str(key).map_err(|_| AttrFromStringSnafu { key }.build())?; + let attr = T::from_str(key).map_err(|_| { + e!(ParseError::AttrFromString { + key: key.to_string() + }) + })?; attrs.entry(attr).or_default().push(value.to_string()); } Ok(Self { attrs, endpoint_id }) @@ -640,10 +630,13 @@ impl TxtAttrs { let mut builder = pkarr::SignedPacket::builder(); for s in self.to_txt_strings() { let mut txt = rdata::TXT::new(); - txt.add_string(&s).context(InvalidTxtEntrySnafu)?; + txt.add_string(&s) + .map_err(|err| e!(EncodingError::InvalidTxtEntry, err))?; builder = builder.txt(name.clone(), txt.into_owned(), ttl); } - let signed_packet = builder.build(&keypair)?; + let signed_packet = builder + .build(&keypair) + .map_err(|err| e!(EncodingError::FailedBuildingPacket, err))?; Ok(signed_packet) } } @@ -679,7 +672,7 @@ mod tests { }, }; use iroh_base::{EndpointId, SecretKey, TransportAddr}; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use super::{EndpointData, EndpointIdExt, EndpointInfo}; use crate::dns::TxtRecordData; @@ -726,7 +719,7 @@ mod tests { let name = Name::from_utf8( "_iroh.dgjpkxyn3zyrk3zfads5duwdgbqpkwbjxfj4yt7rezidr3fijccy.dns.iroh.link.", ) - .context("dns name")?; + .std_context("dns name")?; let query = Query::query(name.clone(), RecordType::TXT); let records = [ Record::from_rdata( @@ -751,7 +744,7 @@ mod tests { )? .to_z32() )) - .context("name")?, + .std_context("name")?, 30, RData::TXT(TXT::new(vec![ "relay=https://euw1-1.relay.iroh.network./".to_string(), @@ -759,7 +752,7 @@ mod tests { ), // Test a record with a completely different name Record::from_rdata( - Name::from_utf8("dns.iroh.link.").context("name")?, + Name::from_utf8("dns.iroh.link.").std_context("name")?, 30, RData::TXT(TXT::new(vec![ "relay=https://euw1-1.relay.iroh.network./".to_string(), diff --git a/iroh-relay/src/main.rs b/iroh-relay/src/main.rs index ef68a6e855d..5f558dbaa15 100644 --- a/iroh-relay/src/main.rs +++ b/iroh-relay/src/main.rs @@ -19,10 +19,9 @@ use iroh_relay::{ }, server::{self as relay, ClientRateLimit, QuicConfig}, }; +use n0_error::{AnyError as Error, Result, StdResultExt, bail_any}; use n0_future::FutureExt; -use n0_snafu::{Error, Result, ResultExt}; use serde::{Deserialize, Serialize}; -use snafu::whatever; use tokio_rustls_acme::{AcmeConfig, caches::DirCache}; use tracing::{debug, warn}; use tracing_subscriber::{EnvFilter, prelude::*}; @@ -62,11 +61,11 @@ enum CertMode { fn load_certs( filename: impl AsRef, ) -> Result>> { - let certfile = std::fs::File::open(filename).context("cannot open certificate file")?; + let certfile = std::fs::File::open(filename).std_context("cannot open certificate file")?; let mut reader = std::io::BufReader::new(certfile); let certs: Result, std::io::Error> = rustls_pemfile::certs(&mut reader).collect(); - let certs = certs.context("reading cert")?; + let certs = certs.std_context("reading cert")?; Ok(certs) } @@ -76,11 +75,13 @@ fn load_secret_key( ) -> Result> { let filename = filename.as_ref(); let keyfile = std::fs::File::open(filename) - .with_context(|| format!("cannot open secret key file {}", filename.display()))?; + .with_std_context(|_| format!("cannot open secret key file {}", filename.display()))?; let mut reader = std::io::BufReader::new(keyfile); loop { - match rustls_pemfile::read_one(&mut reader).context("cannot parse secret key .pem file")? { + match rustls_pemfile::read_one(&mut reader) + .std_context("cannot parse secret key .pem file")? + { Some(rustls_pemfile::Item::Pkcs1Key(key)) => { return Ok(rustls::pki_types::PrivateKeyDer::Pkcs1(key)); } @@ -95,7 +96,7 @@ fn load_secret_key( } } - whatever!( + bail_any!( "no keys found in {} (encrypted keys not supported)", filename.display() ); @@ -292,14 +293,14 @@ async fn http_access_check_inner( match request.send().await { Err(err) => { warn!("Failed to retrieve response for HTTP access check: {err:#}"); - Err(err).context("Failed to fetch response") + Err(err).std_context("Failed to fetch response") } Ok(res) if res.status() == StatusCode::OK => match res.text().await { Ok(text) if text == "true" => Ok(()), - Ok(_) => whatever!("Invalid response text (must be 'true')"), - Err(err) => Err(err).context("Failed to read response"), + Ok(_) => bail_any!("Invalid response text (must be 'true')"), + Err(err) => Err(err).std_context("Failed to read response"), }, - Ok(res) => whatever!("Received invalid status code ({})", res.status()), + Ok(res) => bail_any!("Received invalid status code ({})", res.status()), } } @@ -499,16 +500,16 @@ impl Config { } fn from_str(config: &str) -> Result { - toml::from_str(config).context("config must be valid toml") + toml::from_str(config).std_context("config must be valid toml") } async fn read_from_file(path: impl AsRef) -> Result { if !path.as_ref().is_file() { - whatever!("config-path must be a file"); + bail_any!("config-path must be a file"); } let config_ser = tokio::fs::read_to_string(&path) .await - .context("unable to read config")?; + .std_context("unable to read config")?; Self::from_str(&config_ser) } } @@ -523,7 +524,7 @@ async fn main() -> Result<()> { let cli = Cli::parse(); let mut cfg = Config::load(&cli).await?; if cfg.enable_quic_addr_discovery && cfg.tls.is_none() { - whatever!("TLS must be configured in order to spawn a QUIC endpoint"); + bail_any!("TLS must be configured in order to spawn a QUIC endpoint"); } if cli.dev { // When in `--dev` mode, do not use https, even when tls is configured. @@ -535,7 +536,7 @@ async fn main() -> Result<()> { } } if cfg.tls.is_none() && cfg.enable_quic_addr_discovery { - whatever!("If QUIC address discovery is enabled, TLS must also be configured"); + bail_any!("If QUIC address discovery is enabled, TLS must also be configured"); }; let relay_config = build_relay_config(cfg).await?; debug!("{relay_config:#?}"); @@ -575,21 +576,21 @@ async fn maybe_load_tls( Ok::<_, Error>((key, certs)) }) .await - .context("join")??; + .std_context("join")??; let server_config = server_config .with_single_cert(certs.clone(), private_key) - .context("tls config")?; + .std_context("tls config")?; (relay::CertConfig::Manual { certs }, server_config) } CertMode::LetsEncrypt => { let hostname = tls .hostname .clone() - .context("LetsEncrypt needs a hostname")?; + .std_context("LetsEncrypt needs a hostname")?; let contact = tls .contact .clone() - .context("LetsEncrypt needs a contact email")?; + .std_context("LetsEncrypt needs a contact email")?; let config = AcmeConfig::new(vec![hostname.clone()]) .contact([format!("mailto:{contact}")]) .cache_option(Some(DirCache::new(tls.cert_dir()))) @@ -631,7 +632,7 @@ async fn maybe_load_tls( let resolver = Arc::new( relay::ReloadingResolver::init(loader, interval) .await - .context("cert loading")?, + .std_context("cert loading")?, ); let server_config = server_config.with_cert_resolver(resolver); (relay::CertConfig::Reloading, server_config) @@ -661,7 +662,7 @@ async fn build_relay_config(cfg: Config) -> Result Result { if rx.bytes_per_second.is_none() && rx.max_burst_bytes.is_some() { - whatever!("bytes_per_seconds must be specified to enable the rate-limiter"); + bail_any!("bytes_per_seconds must be specified to enable the rate-limiter"); } match rx.bytes_per_second { Some(bps) => Some(ClientRateLimit { bytes_per_second: TryInto::::try_into(bps) - .context("bytes_per_second must be non-zero u32")?, + .std_context("bytes_per_second must be non-zero u32")?, max_burst_bytes: rx .max_burst_bytes .map(|v| { TryInto::::try_into(v) - .context("max_burst_bytes must be non-zero u32") + .std_context("max_burst_bytes must be non-zero u32") }) .transpose()?, }), @@ -725,7 +726,7 @@ mod tests { use std::num::NonZeroU32; use iroh_base::SecretKey; - use n0_snafu::Result; + use n0_error::Result; use rand::SeedableRng; use rand_chacha::ChaCha8Rng; diff --git a/iroh-relay/src/protos/common.rs b/iroh-relay/src/protos/common.rs index 2434d112d1e..45750178d01 100644 --- a/iroh-relay/src/protos/common.rs +++ b/iroh-relay/src/protos/common.rs @@ -4,9 +4,11 @@ //! integers for different frames. use bytes::{Buf, BufMut}; -use nested_enum_utils::common_fields; -use quinn_proto::{VarInt, coding::Codec}; -use snafu::{Backtrace, OptionExt, Snafu}; +use n0_error::{e, stack_error}; +use quinn_proto::{ + VarInt, + coding::{Codec, UnexpectedEnd}, +}; /// Possible frame types during handshaking #[repr(u32)] @@ -53,18 +55,16 @@ pub enum FrameType { Restarting = 12, } -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum FrameTypeError { - #[snafu(display("not enough bytes to parse frame type"))] - UnexpectedEnd {}, - #[snafu(display("frame type unknown"))] + #[error("not enough bytes to parse frame type")] + UnexpectedEnd { + #[error(std_err)] + source: UnexpectedEnd, + }, + #[error("frame type unknown")] UnknownFrameType { tag: VarInt }, } @@ -93,13 +93,11 @@ impl FrameType { /// Parses the frame type (as a QUIC-encoded varint) from the first couple of bytes given /// and returns the frame type and the rest. pub(crate) fn from_bytes(buf: &mut impl Buf) -> Result { - let tag = VarInt::decode(buf).ok().context(UnexpectedEndSnafu)?; + let tag = VarInt::decode(buf).map_err(|err| e!(FrameTypeError::UnexpectedEnd, err))?; let tag_u32 = u32::try_from(u64::from(tag)) - .ok() - .context(UnknownFrameTypeSnafu { tag })?; + .map_err(|_| e!(FrameTypeError::UnknownFrameType { tag }))?; let frame_type = FrameType::try_from(tag_u32) - .ok() - .context(UnknownFrameTypeSnafu { tag })?; + .map_err(|_| e!(FrameTypeError::UnknownFrameType { tag }))?; Ok(frame_type) } } diff --git a/iroh-relay/src/protos/handshake.rs b/iroh-relay/src/protos/handshake.rs index 315ba197219..e3be23081ee 100644 --- a/iroh-relay/src/protos/handshake.rs +++ b/iroh-relay/src/protos/handshake.rs @@ -31,11 +31,10 @@ use http::HeaderValue; #[cfg(feature = "server")] use iroh_base::Signature; use iroh_base::{PublicKey, SecretKey}; +use n0_error::{e, ensure, stack_error}; use n0_future::{SinkExt, TryStreamExt}; -use nested_enum_utils::common_fields; #[cfg(feature = "server")] use rand::CryptoRng; -use snafu::{Backtrace, ResultExt, Snafu}; use tracing::trace; use super::{ @@ -133,36 +132,37 @@ impl Frame for ServerDeniesAuth { const TAG: FrameType = FrameType::ServerDeniesAuth; } -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum Error { - #[snafu(transparent)] + #[error(transparent)] Websocket { #[cfg(not(wasm_browser))] + #[error(from, std_err)] source: tokio_websockets::Error, #[cfg(wasm_browser)] + #[error(from, std_err)] source: ws_stream_wasm::WsErr, }, - #[snafu(display("Handshake stream ended prematurely"))] + #[error("Handshake stream ended prematurely")] UnexpectedEnd {}, - #[snafu(transparent)] - FrameTypeError { source: FrameTypeError }, - #[snafu(display("The relay denied our authentication ({reason})"))] + #[error(transparent)] + FrameTypeError { + #[error(from)] + source: FrameTypeError, + }, + #[error("The relay denied our authentication ({reason})")] ServerDeniedAuth { reason: String }, - #[snafu(display("Unexpected tag, got {frame_type:?}, but expected one of {expected_types:?}"))] + #[error("Unexpected tag, got {frame_type:?}, but expected one of {expected_types:?}")] UnexpectedFrameType { frame_type: FrameType, expected_types: Vec, }, - #[snafu(display("Handshake failed while deserializing {frame_type:?} frame"))] + #[error("Handshake failed while deserializing {frame_type:?} frame")] DeserializationError { frame_type: FrameType, + #[error(std_err)] source: postcard::Error, }, #[cfg(feature = "server")] @@ -171,20 +171,20 @@ pub enum Error { } #[cfg(feature = "server")] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] pub(crate) enum VerificationError { - #[snafu(display("Couldn't export TLS keying material on our end"))] + #[error("Couldn't export TLS keying material on our end")] NoKeyingMaterial, - #[snafu(display( + #[error( "Client didn't extract the same keying material, the suffix mismatched: expected {expected:X?} but got {actual:X?}" - ))] + )] MismatchedSuffix { expected: [u8; 16], actual: [u8; 16], }, - #[snafu(display( + #[error( "Client signature {signature:X?} for message {message:X?} invalid for public key {public_key}" - ))] + )] SignatureInvalid { source: iroh_base::SignatureError, message: Vec, @@ -231,10 +231,13 @@ impl ClientAuth { let message = challenge.message_to_sign(); self.public_key .verify(&message, &Signature::from_bytes(&self.signature)) - .with_context(|_| SignatureInvalidSnafu { - message: message.to_vec(), - signature: self.signature, - public_key: self.public_key, + .map_err(|err| { + e!(VerificationError::SignatureInvalid { + source: err, + message: message.to_vec(), + signature: self.signature, + public_key: self.public_key + }) }) .map_err(Box::new) } @@ -282,15 +285,13 @@ impl KeyMaterialClientAuth { &self, io: &impl ExportKeyingMaterial, ) -> Result<(), Box> { - use snafu::OptionExt; - let key_material = io .export_keying_material( [0u8; 32], DOMAIN_SEP_TLS_EXPORT_LABEL, Some(self.public_key.as_bytes()), ) - .context(NoKeyingMaterialSnafu)?; + .ok_or_else(|| e!(VerificationError::NoKeyingMaterial))?; // We split the export and only sign the first 16 bytes, and // pass through the last 16 bytes. // Passing on the suffix helps the verifying end figure out what @@ -301,9 +302,9 @@ impl KeyMaterialClientAuth { // there must be something wrong with the client's secret key or signature. let (message, suffix) = key_material.split_at(16); let suffix: [u8; 16] = suffix.try_into().expect("hardcoded length"); - snafu::ensure!( + ensure!( suffix == self.key_material_suffix, - MismatchedSuffixSnafu { + VerificationError::MismatchedSuffix { expected: self.key_material_suffix, actual: suffix } @@ -313,10 +314,13 @@ impl KeyMaterialClientAuth { // the TLS export keying material above. self.public_key .verify(message, &Signature::from_bytes(&self.signature)) - .with_context(|_| SignatureInvalidSnafu { - message: message.to_vec(), - public_key: self.public_key, - signature: self.signature, + .map_err(|err| { + e!(VerificationError::SignatureInvalid { + source: err, + message: message.to_vec(), + public_key: self.public_key, + signature: self.signature + }) }) .map_err(Box::new) } @@ -353,10 +357,9 @@ pub(crate) async fn clientside( } FrameType::ServerDeniesAuth => { let denial: ServerDeniesAuth = deserialize_frame(frame)?; - Err(ServerDeniedAuthSnafu { - reason: denial.reason, - } - .build()) + Err(e!(Error::ServerDeniedAuth { + reason: denial.reason + })) } _ => unreachable!(), } @@ -407,18 +410,16 @@ pub(crate) async fn serverside( let client_auth_bytes = data_encoding::BASE64URL_NOPAD .decode(client_auth_header.as_ref()) .map_err(|_| { - ClientAuthHeaderInvalidSnafu { - value: client_auth_header.clone(), - } - .build() + e!(Error::ClientAuthHeaderInvalid { + value: client_auth_header.clone() + }) })?; let client_auth: KeyMaterialClientAuth = postcard::from_bytes(&client_auth_bytes).map_err(|_| { - ClientAuthHeaderInvalidSnafu { - value: client_auth_header.clone(), - } - .build() + e!(Error::ClientAuthHeaderInvalid { + value: client_auth_header.clone() + }) })?; if client_auth.verify(io).is_ok() { @@ -444,10 +445,9 @@ pub(crate) async fn serverside( reason: "signature invalid".into(), }; write_frame(io, denial.clone()).await?; - ServerDeniedAuthSnafu { - reason: denial.reason, - } - .fail() + Err(e!(Error::ServerDeniedAuth { + reason: denial.reason + })) } else { trace!(?client_auth.public_key, "authentication succeeded via challenge"); Ok(SuccessfulAuthentication { @@ -474,10 +474,9 @@ impl SuccessfulAuthentication { reason: "not authorized".into(), }; write_frame(io, denial.clone()).await?; - ServerDeniedAuthSnafu { - reason: denial.reason, - } - .fail() + Err(e!(Error::ServerDeniedAuth { + reason: denial.reason + })) } } } @@ -505,13 +504,13 @@ async fn read_frame( let mut payload = io .try_next() .await? - .ok_or_else(|| UnexpectedEndSnafu.build())?; + .ok_or_else(|| e!(Error::UnexpectedEnd))?; let frame_type = FrameType::from_bytes(&mut payload)?; trace!(?frame_type, "Reading frame"); - snafu::ensure!( + ensure!( expected_types.contains(&frame_type), - UnexpectedFrameTypeSnafu { + Error::UnexpectedFrameType { frame_type, expected_types: expected_types.to_vec() } @@ -521,15 +520,20 @@ async fn read_frame( } fn deserialize_frame(frame: Bytes) -> Result { - postcard::from_bytes(&frame).context(DeserializationSnafu { frame_type: F::TAG }) + postcard::from_bytes(&frame).map_err(|err| { + e!(Error::DeserializationError { + frame_type: F::TAG, + source: err + }) + }) } #[cfg(all(test, feature = "server"))] mod tests { use bytes::BytesMut; use iroh_base::{PublicKey, SecretKey}; + use n0_error::{Result, StackResultExt, StdResultExt}; use n0_future::{Sink, SinkExt, Stream, TryStreamExt}; - use n0_snafu::{Result, ResultExt}; use rand::SeedableRng; use tokio_util::codec::{Framed, LengthDelimitedCodec}; use tracing::{Instrument, info_span}; @@ -765,8 +769,8 @@ mod tests { let challenge = ServerChallenge::new(&mut rng); let client_auth = ClientAuth::new(&secret_key, &challenge); - let bytes = postcard::to_allocvec(&client_auth).e()?; - let decoded: ClientAuth = postcard::from_bytes(&bytes).e()?; + let bytes = postcard::to_allocvec(&client_auth).anyerr()?; + let decoded: ClientAuth = postcard::from_bytes(&bytes).anyerr()?; assert_eq!(client_auth.public_key, decoded.public_key); assert_eq!(client_auth.signature, decoded.signature); @@ -785,10 +789,10 @@ mod tests { shared_secret: Some(42), }, ) - .e()?; + .anyerr()?; - let bytes = postcard::to_allocvec(&client_auth).e()?; - let decoded: KeyMaterialClientAuth = postcard::from_bytes(&bytes).e()?; + let bytes = postcard::to_allocvec(&client_auth).anyerr()?; + let decoded: KeyMaterialClientAuth = postcard::from_bytes(&bytes).anyerr()?; assert_eq!(client_auth.public_key, decoded.public_key); assert_eq!(client_auth.signature, decoded.signature); @@ -815,7 +819,7 @@ mod tests { inner: (), shared_secret: Some(42), }; - let client_auth = KeyMaterialClientAuth::new(&secret_key, &io).e()?; + let client_auth = KeyMaterialClientAuth::new(&secret_key, &io).anyerr()?; assert!(client_auth.verify(&io).is_ok()); Ok(()) diff --git a/iroh-relay/src/protos/relay.rs b/iroh-relay/src/protos/relay.rs index 26e60e0e669..991f4472f54 100644 --- a/iroh-relay/src/protos/relay.rs +++ b/iroh-relay/src/protos/relay.rs @@ -11,9 +11,8 @@ use std::num::NonZeroU16; use bytes::{Buf, BufMut, Bytes, BytesMut}; use iroh_base::{EndpointId, KeyParsingError}; +use n0_error::{e, ensure, stack_error}; use n0_future::time::Duration; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, ResultExt, Snafu}; use super::common::{FrameType, FrameTypeError}; use crate::KeyCache; @@ -41,32 +40,33 @@ pub(crate) const PING_INTERVAL: Duration = Duration::from_secs(15); pub(crate) const PER_CLIENT_SEND_QUEUE_DEPTH: usize = 512; /// Protocol send errors. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] +#[stack_error(derive, add_meta, from_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum Error { - #[snafu(display("unexpected frame: got {got:?}, expected {expected:?}"))] + #[error("unexpected frame: got {got:?}, expected {expected:?}")] UnexpectedFrame { got: FrameType, expected: FrameType }, - #[snafu(display("Frame is too large, has {frame_len} bytes"))] + #[error("Frame is too large, has {frame_len} bytes")] FrameTooLarge { frame_len: usize }, - #[snafu(transparent)] - SerDe { source: postcard::Error }, - #[snafu(transparent)] + #[error(transparent)] + SerDe { + #[error(std_err)] + source: postcard::Error, + }, + #[error(transparent)] FrameTypeError { source: FrameTypeError }, - #[snafu(display("Invalid public key"))] + #[error("Invalid public key")] InvalidPublicKey { source: KeyParsingError }, - #[snafu(display("Invalid frame encoding"))] + #[error("Invalid frame encoding")] InvalidFrame {}, - #[snafu(display("Invalid frame type: {frame_type:?}"))] + #[error("Invalid frame type: {frame_type:?}")] InvalidFrameType { frame_type: FrameType }, - #[snafu(display("Invalid protocol message encoding"))] - InvalidProtocolMessageEncoding { source: std::str::Utf8Error }, - #[snafu(display("Too few bytes"))] + #[error("Invalid protocol message encoding")] + InvalidProtocolMessageEncoding { + #[error(std_err)] + source: std::str::Utf8Error, + }, + #[error("Too few bytes")] TooSmall {}, } @@ -220,9 +220,9 @@ impl Datagrams { fn from_bytes(mut bytes: Bytes, is_batch: bool) -> Result { if is_batch { // 1 bytes ECN, 2 bytes segment size - snafu::ensure!(bytes.len() >= 3, InvalidFrameSnafu); + ensure!(bytes.len() >= 3, Error::InvalidFrame); } else { - snafu::ensure!(bytes.len() >= 1, InvalidFrameSnafu); + ensure!(bytes.len() >= 1, Error::InvalidFrame); } let ecn_byte = bytes.get_u8(); @@ -329,18 +329,16 @@ impl RelayToClientMsg { pub(crate) fn from_bytes(mut content: Bytes, cache: &KeyCache) -> Result { let frame_type = FrameType::from_bytes(&mut content)?; let frame_len = content.len(); - snafu::ensure!( + ensure!( frame_len <= MAX_PACKET_SIZE, - FrameTooLargeSnafu { frame_len } + Error::FrameTooLarge { frame_len } ); let res = match frame_type { FrameType::RelayToClientDatagram | FrameType::RelayToClientDatagramBatch => { - snafu::ensure!(content.len() >= EndpointId::LENGTH, InvalidFrameSnafu); + ensure!(content.len() >= EndpointId::LENGTH, Error::InvalidFrame); - let remote_endpoint_id = cache - .key_from_slice(&content[..EndpointId::LENGTH]) - .context(InvalidPublicKeySnafu)?; + let remote_endpoint_id = cache.key_from_slice(&content[..EndpointId::LENGTH])?; let datagrams = Datagrams::from_bytes( content.slice(EndpointId::LENGTH..), frame_type == FrameType::RelayToClientDatagramBatch, @@ -351,41 +349,37 @@ impl RelayToClientMsg { } } FrameType::EndpointGone => { - snafu::ensure!(content.len() == EndpointId::LENGTH, InvalidFrameSnafu); - let endpoint_id = cache - .key_from_slice(content.as_ref()) - .context(InvalidPublicKeySnafu)?; + ensure!(content.len() == EndpointId::LENGTH, Error::InvalidFrame); + let endpoint_id = cache.key_from_slice(content.as_ref())?; Self::EndpointGone(endpoint_id) } FrameType::Ping => { - snafu::ensure!(content.len() == 8, InvalidFrameSnafu); + ensure!(content.len() == 8, Error::InvalidFrame); let mut data = [0u8; 8]; data.copy_from_slice(&content[..8]); Self::Ping(data) } FrameType::Pong => { - snafu::ensure!(content.len() == 8, InvalidFrameSnafu); + ensure!(content.len() == 8, Error::InvalidFrame); let mut data = [0u8; 8]; data.copy_from_slice(&content[..8]); Self::Pong(data) } FrameType::Health => { - let problem = std::str::from_utf8(&content) - .context(InvalidProtocolMessageEncodingSnafu)? - .to_owned(); + let problem = std::str::from_utf8(&content)?.to_owned(); Self::Health { problem } } FrameType::Restarting => { - snafu::ensure!(content.len() == 4 + 4, InvalidFrameSnafu); + ensure!(content.len() == 4 + 4, Error::InvalidFrame); let reconnect_in = u32::from_be_bytes( content[..4] .try_into() - .map_err(|_| InvalidFrameSnafu.build())?, + .map_err(|_| e!(Error::InvalidFrame))?, ); let try_for = u32::from_be_bytes( content[4..] .try_into() - .map_err(|_| InvalidFrameSnafu.build())?, + .map_err(|_| e!(Error::InvalidFrame))?, ); let reconnect_in = Duration::from_millis(reconnect_in as u64); let try_for = Duration::from_millis(try_for as u64); @@ -395,7 +389,7 @@ impl RelayToClientMsg { } } _ => { - return Err(InvalidFrameTypeSnafu { frame_type }.build()); + return Err(e!(Error::InvalidFrameType { frame_type })); } }; Ok(res) @@ -463,16 +457,14 @@ impl ClientToRelayMsg { pub(crate) fn from_bytes(mut content: Bytes, cache: &KeyCache) -> Result { let frame_type = FrameType::from_bytes(&mut content)?; let frame_len = content.len(); - snafu::ensure!( + ensure!( frame_len <= MAX_PACKET_SIZE, - FrameTooLargeSnafu { frame_len } + Error::FrameTooLarge { frame_len } ); let res = match frame_type { FrameType::ClientToRelayDatagram | FrameType::ClientToRelayDatagramBatch => { - let dst_endpoint_id = cache - .key_from_slice(&content[..EndpointId::LENGTH]) - .context(InvalidPublicKeySnafu)?; + let dst_endpoint_id = cache.key_from_slice(&content[..EndpointId::LENGTH])?; let datagrams = Datagrams::from_bytes( content.slice(EndpointId::LENGTH..), frame_type == FrameType::ClientToRelayDatagramBatch, @@ -483,19 +475,19 @@ impl ClientToRelayMsg { } } FrameType::Ping => { - snafu::ensure!(content.len() == 8, InvalidFrameSnafu); + ensure!(content.len() == 8, Error::InvalidFrame); let mut data = [0u8; 8]; data.copy_from_slice(&content[..8]); Self::Ping(data) } FrameType::Pong => { - snafu::ensure!(content.len() == 8, InvalidFrameSnafu); + ensure!(content.len() == 8, Error::InvalidFrame); let mut data = [0u8; 8]; data.copy_from_slice(&content[..8]); Self::Pong(data) } _ => { - return Err(InvalidFrameTypeSnafu { frame_type }.build()); + return Err(e!(Error::InvalidFrameType { frame_type })); } }; Ok(res) @@ -507,7 +499,7 @@ impl ClientToRelayMsg { mod tests { use data_encoding::HEXLOWER; use iroh_base::SecretKey; - use n0_snafu::Result; + use n0_error::Result; use super::*; diff --git a/iroh-relay/src/quic.rs b/iroh-relay/src/quic.rs index b634802dda0..9b6b7bc3b00 100644 --- a/iroh-relay/src/quic.rs +++ b/iroh-relay/src/quic.rs @@ -2,10 +2,9 @@ //! for QUIC address discovery. use std::{net::SocketAddr, sync::Arc}; +use n0_error::stack_error; use n0_future::time::Duration; -use nested_enum_utils::common_fields; use quinn::{VarInt, crypto::rustls::QuicClientConfig}; -use snafu::{Backtrace, Snafu}; use tokio::sync::watch; /// ALPN for our quic addr discovery @@ -17,11 +16,11 @@ pub const QUIC_ADDR_DISC_CLOSE_REASON: &[u8] = b"finished"; #[cfg(feature = "server")] pub(crate) mod server { + use n0_error::e; use quinn::{ ApplicationClose, ConnectionError, crypto::rustls::{NoInitialCipherSuite, QuicServerConfig}, }; - use snafu::ResultExt; use tokio::task::JoinSet; use tokio_util::{sync::CancellationToken, task::AbortOnDropHandle}; use tracing::{Instrument, debug, info, info_span}; @@ -37,29 +36,23 @@ pub(crate) mod server { /// Server spawn errors #[allow(missing_docs)] - #[derive(Debug, Snafu)] + #[stack_error(derive, add_meta)] #[non_exhaustive] pub enum QuicSpawnError { - #[snafu(transparent)] + #[error(transparent)] NoInitialCipherSuite { + #[error(std_err, from)] source: NoInitialCipherSuite, - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("Unable to spawn a QUIC endpoint server"))] + #[error("Unable to spawn a QUIC endpoint server")] EndpointServer { + #[error(std_err)] source: std::io::Error, - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("Unable to get the local address from the endpoint"))] + #[error("Unable to get the local address from the endpoint")] LocalAddr { + #[error(std_err)] source: std::io::Error, - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, } @@ -114,8 +107,10 @@ pub(crate) mod server { .send_observed_address_reports(true); let endpoint = quinn::Endpoint::server(server_config, quic_config.bind_addr) - .context(EndpointServerSnafu)?; - let bind_addr = endpoint.local_addr().context(LocalAddrSnafu)?; + .map_err(|err| e!(QuicSpawnError::EndpointServer, err))?; + let bind_addr = endpoint + .local_addr() + .map_err(|err| e!(QuicSpawnError::LocalAddr, err))?; info!(?bind_addr, "QUIC server listening on"); @@ -228,21 +223,25 @@ pub(crate) mod server { } /// Quic client related errors. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] pub enum Error { - #[snafu(transparent)] - Connect { source: quinn::ConnectError }, - #[snafu(transparent)] - Connection { source: quinn::ConnectionError }, - #[snafu(transparent)] - WatchRecv { source: watch::error::RecvError }, + #[error(transparent)] + Connect { + #[error(std_err)] + source: quinn::ConnectError, + }, + #[error(transparent)] + Connection { + #[error(std_err)] + source: quinn::ConnectionError, + }, + #[error(transparent)] + WatchRecv { + #[error(std_err)] + source: watch::error::RecvError, + }, } /// Handles the client side of QUIC address discovery. @@ -313,7 +312,7 @@ impl QuicClient { // tokio::select! { // _ = cancel.cancelled() => { // conn.close(QUIC_ADDR_DISC_CLOSE_CODE, QUIC_ADDR_DISC_CLOSE_REASON); - // bail!("QUIC address discovery canceled early"); + // bail_any!("QUIC address discovery canceled early"); // }, // res = external_addresses.wait_for(|addr| addr.is_some()) => { // let addr = res?.expect("checked"); @@ -358,11 +357,11 @@ impl QuicClient { mod tests { use std::net::Ipv4Addr; + use n0_error::{Result, StdResultExt}; use n0_future::{ task::AbortOnDropHandle, time::{self, Instant}, }; - use n0_snafu::{Error, Result, ResultExt}; use quinn::crypto::rustls::QuicServerConfig; use tracing::{Instrument, debug, info, info_span}; use tracing_test::traced_test; @@ -387,8 +386,8 @@ mod tests { // create a client-side endpoint let client_endpoint = - quinn::Endpoint::client(SocketAddr::new(host.into(), 0)).context("client")?; - let client_addr = client_endpoint.local_addr().context("local addr")?; + quinn::Endpoint::client(SocketAddr::new(host.into(), 0)).std_context("client")?; + let client_addr = client_endpoint.local_addr().std_context("local addr")?; // create the client configuration used for the client endpoint when they // initiate a connection with the server @@ -414,14 +413,14 @@ mod tests { // create a client-side endpoint let client_endpoint = quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)) - .context("client")?; + .std_context("client")?; // create an socket that does not respond. let server_socket = tokio::net::UdpSocket::bind(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)) .await - .context("bind")?; - let server_addr = server_socket.local_addr().context("local addr")?; + .std_context("bind")?; + let server_addr = server_socket.local_addr().std_context("local addr")?; // create the client configuration used for the client endpoint when they // initiate a connection with the server @@ -465,22 +464,22 @@ mod tests { // need to pop off messages before we attach it to the Quinn Endpoint. let socket = tokio::net::UdpSocket::bind(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)) .await - .context("bind")?; - let server_addr = socket.local_addr().context("local addr")?; + .std_context("bind")?; + let server_addr = socket.local_addr().std_context("local addr")?; info!(addr = ?server_addr, "server socket bound"); // Create a QAD server with a self-signed cert, all manually. - let cert = - rcgen::generate_simple_self_signed(vec!["localhost".into()]).context("self signed")?; + let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]) + .std_context("self signed")?; let key = PrivatePkcs8KeyDer::from(cert.signing_key.serialize_der()); let mut server_crypto = rustls::ServerConfig::builder() .with_no_client_auth() .with_single_cert(vec![cert.cert.into()], key.into()) - .context("tls")?; + .std_context("tls")?; server_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); server_crypto.alpn_protocols = vec![ALPN_QUIC_ADDR_DISC.to_vec()]; let mut server_config = quinn::ServerConfig::with_crypto(Arc::new( - QuicServerConfig::try_from(server_crypto).context("config")?, + QuicServerConfig::try_from(server_crypto).std_context("config")?, )); let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); transport_config.send_observed_address_reports(true); @@ -505,14 +504,14 @@ mod tests { socket.into_std().unwrap(), Arc::new(quinn::TokioRuntime), ) - .context("endpoint new")?; + .std_context("endpoint new")?; info!("accepting conn"); let incoming = server.accept().await.expect("missing conn"); info!("incoming!"); - let conn = incoming.await.context("incoming")?; + let conn = incoming.await.std_context("incoming")?; conn.closed().await; server.wait_idle().await; - Ok::<_, Error>(()) + n0_error::Ok(()) } .instrument(info_span!("server")), ); @@ -521,7 +520,7 @@ mod tests { info!("starting client"); let client_endpoint = quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)) - .context("client")?; + .std_context("client")?; // create the client configuration used for the client endpoint when they // initiate a connection with the server @@ -535,15 +534,15 @@ mod tests { quic_client.get_addr_and_latency(server_addr, "localhost"), ) .await - .context("timeout")??; + .std_context("timeout")??; let duration = start.elapsed(); info!(?duration, ?addr, ?latency, "QAD succeeded"); assert!(duration >= Duration::from_secs(1)); time::timeout(Duration::from_secs(10), server_task) .await - .context("timeout")? - .context("server task")??; + .std_context("timeout")? + .std_context("server task")??; Ok(()) } diff --git a/iroh-relay/src/server.rs b/iroh-relay/src/server.rs index 034ac46cac7..f3b470b8a3c 100644 --- a/iroh-relay/src/server.rs +++ b/iroh-relay/src/server.rs @@ -26,9 +26,8 @@ use hyper::body::Incoming; use iroh_base::EndpointId; #[cfg(feature = "test-utils")] use iroh_base::RelayUrl; +use n0_error::{e, stack_error}; use n0_future::{StreamExt, future::Boxed}; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, ResultExt, Snafu}; use tokio::{ net::TcpListener, task::{JoinError, JoinSet}, @@ -265,48 +264,47 @@ pub struct Server { } /// Server spawn errors -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, std_sources)] #[non_exhaustive] pub enum SpawnError { - #[snafu(display("Unable to get local address"))] + #[error("Unable to get local address")] LocalAddr { source: std::io::Error }, - #[snafu(display("Failed to bind QAD listener"))] + #[error("Failed to bind QAD listener")] QuicSpawn { source: QuicSpawnError }, - #[snafu(display("Failed to parse TLS header"))] + #[error("Failed to parse TLS header")] TlsHeaderParse { source: InvalidHeaderValue }, - #[snafu(display("Failed to bind TcpListener"))] + #[error("Failed to bind TcpListener")] BindTlsListener { source: std::io::Error }, - #[snafu(display("No local address"))] + #[error("No local address")] NoLocalAddr { source: std::io::Error }, - #[snafu(display("Failed to bind server socket to {addr}"))] - BindTcpListener { addr: SocketAddr }, + #[error("Failed to bind server socket to {addr}")] + BindTcpListener { + source: std::io::Error, + addr: SocketAddr, + }, } /// Server task errors -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum SupervisorError { - #[snafu(display("Error starting metrics server"))] - Metrics { source: std::io::Error }, - #[snafu(display("Acme event stream finished"))] + #[error("Error starting metrics server")] + Metrics { + #[error(std_err)] + source: std::io::Error, + }, + #[error("Acme event stream finished")] AcmeEventStreamFinished {}, - #[snafu(transparent)] - JoinError { source: JoinError }, - #[snafu(display("No relay services are enabled"))] + #[error(transparent)] + JoinError { + #[error(from, std_err)] + source: JoinError, + }, + #[error("No relay services are enabled")] NoRelayServicesEnabled {}, - #[snafu(display("Task cancelled"))] + #[error("Task cancelled")] TaskCancelled {}, } @@ -330,7 +328,7 @@ impl Server { async move { iroh_metrics::service::start_metrics_server(addr, Arc::new(registry)) .await - .context(MetricsSnafu) + .map_err(|err| e!(SupervisorError::Metrics, err)) } .instrument(info_span!("metrics-server")), ); @@ -348,7 +346,7 @@ impl Server { let quic_server = match config.quic { Some(quic_config) => { debug!("Starting QUIC server {}", quic_config.bind_addr); - Some(QuicServer::spawn(quic_config).context(QuicSpawnSnafu)?) + Some(QuicServer::spawn(quic_config).map_err(|err| e!(SpawnError::QuicSpawn, err))?) } None => None, }; @@ -360,7 +358,12 @@ impl Server { debug!("Starting Relay server"); let mut headers = HeaderMap::new(); for (name, value) in TLS_HEADERS.iter() { - headers.insert(*name, value.parse().context(TlsHeaderParseSnafu)?); + headers.insert( + *name, + value + .parse() + .map_err(|err| e!(SpawnError::TlsHeaderParse, err))?, + ); } let relay_bind_addr = match relay_config.tls { Some(ref tls) => tls.https_bind_addr, @@ -395,7 +398,7 @@ impl Server { Err(err) => error!("error: {err:?}"), } } - Err(AcmeEventStreamFinishedSnafu.build()) + Err(e!(SupervisorError::AcmeEventStreamFinished)) } .instrument(info_span!("acme")), ); @@ -421,8 +424,10 @@ impl Server { // these standalone. let http_listener = TcpListener::bind(&relay_config.http_bind_addr) .await - .context(BindTlsListenerSnafu)?; - let http_addr = http_listener.local_addr().context(NoLocalAddrSnafu)?; + .map_err(|err| e!(SpawnError::BindTlsListener, err))?; + let http_addr = http_listener + .local_addr() + .map_err(|err| e!(SpawnError::NoLocalAddr, err))?; tasks.spawn( async move { run_captive_portal_service(http_listener).await; @@ -564,7 +569,7 @@ async fn relay_supervisor( Some(ret) = tasks.join_next() => ret, ret = &mut quic_fut, if quic_enabled => ret.map(Ok), ret = &mut relay_fut, if relay_enabled => ret.map(Ok), - else => Ok(Err(NoRelayServicesEnabledSnafu.build())), + else => Ok(Err(e!(SupervisorError::NoRelayServicesEnabled))), }; let ret = match res { Ok(Ok(())) => { @@ -581,7 +586,7 @@ async fn relay_supervisor( std::panic::resume_unwind(panic); } debug!("Task cancelled"); - Err(TaskCancelledSnafu.build()) + Err(e!(SupervisorError::TaskCancelled)) } }; @@ -750,8 +755,8 @@ mod tests { use http::StatusCode; use iroh_base::{EndpointId, RelayUrl, SecretKey}; + use n0_error::Result; use n0_future::{FutureExt, SinkExt, StreamExt}; - use n0_snafu::Result; use rand::SeedableRng; use tracing::{info, instrument}; use tracing_test::traced_test; diff --git a/iroh-relay/src/server/client.rs b/iroh-relay/src/server/client.rs index 21ec13bb634..b1af242de8e 100644 --- a/iroh-relay/src/server/client.rs +++ b/iroh-relay/src/server/client.rs @@ -3,10 +3,9 @@ use std::{collections::HashSet, sync::Arc, time::Duration}; use iroh_base::EndpointId; +use n0_error::{e, stack_error}; use n0_future::{SinkExt, StreamExt}; -use nested_enum_utils::common_fields; use rand::Rng; -use snafu::{Backtrace, GenerateImplicitData, Snafu}; use time::{Date, OffsetDateTime}; use tokio::{ sync::mpsc::{self, error::TrySendError}, @@ -171,98 +170,67 @@ impl Client { } /// Error for [`Actor::handle_frame`] -#[common_fields({ - backtrace: Option, -})] +#[stack_error(derive, add_meta, from_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum HandleFrameError { - #[snafu(transparent)] + #[error(transparent)] ForwardPacket { source: ForwardPacketError }, - #[snafu(display("Stream terminated"))] + #[error("Stream terminated")] StreamTerminated {}, - #[snafu(transparent)] + #[error(transparent)] Recv { source: RelayRecvError }, - #[snafu(transparent)] + #[error(transparent)] Send { source: WriteFrameError }, } /// Error for [`Actor::write_frame`] -#[common_fields({ - backtrace: Option, -})] +#[stack_error(derive, add_meta, from_sources)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum WriteFrameError { - #[snafu(transparent)] + #[error(transparent)] Stream { source: RelaySendError }, - #[snafu(transparent)] - Timeout { source: tokio::time::error::Elapsed }, + #[error(transparent)] + Timeout { + #[error(std_err)] + source: tokio::time::error::Elapsed, + }, } /// Run error -#[common_fields({ - backtrace: Option, -})] +#[stack_error(derive, add_meta)] #[allow(missing_docs)] -#[derive(Debug, Snafu)] #[non_exhaustive] pub enum RunError { - #[snafu(transparent)] - ForwardPacket { source: ForwardPacketError }, - #[snafu(display("Flush"))] - Flush { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(transparent)] - HandleFrame { source: HandleFrameError }, - #[snafu(display("Server.disco_send_queue dropped"))] - DiscoSendQueuePacketDrop { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Failed to send disco packet"))] - DiscoPacketSend { - source: WriteFrameError, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Server.send_queue dropped"))] - SendQueuePacketDrop { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Failed to send packet"))] - PacketSend { - source: WriteFrameError, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Server.endpoint_gone dropped"))] - EndpointGoneDrop { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("EndpointGone write frame failed"))] - EndpointGoneWriteFrame { - source: WriteFrameError, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Keep alive write frame failed"))] - KeepAliveWriteFrame { - source: WriteFrameError, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, + #[error(transparent)] + ForwardPacket { + #[error(from)] + source: ForwardPacketError, }, - #[snafu(display("Tick flush"))] - TickFlush { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, + #[error("Flush")] + Flush {}, + #[error(transparent)] + HandleFrame { + #[error(from)] + source: HandleFrameError, }, + #[error("Server.disco_send_queue dropped")] + DiscoSendQueuePacketDrop {}, + #[error("Failed to send disco packet")] + DiscoPacketSend { source: WriteFrameError }, + #[error("Server.send_queue dropped")] + SendQueuePacketDrop {}, + #[error("Failed to send packet")] + PacketSend { source: WriteFrameError }, + #[error("Server.endpoint_gone dropped")] + EndpointGoneDrop {}, + #[error("EndpointGone write frame failed")] + EndpointGoneWriteFrame { source: WriteFrameError }, + #[error("Keep alive write frame failed")] + KeepAliveWriteFrame { source: WriteFrameError }, + #[error("Tick flush")] + TickFlush {}, } /// Manages all the reads and writes to this client. It periodically sends a `KEEP_ALIVE` @@ -330,8 +298,6 @@ impl Actor { } async fn run_inner(&mut self, done: CancellationToken) -> Result<(), RunError> { - use snafu::ResultExt; - // Add some jitter to ping pong interactions, to avoid all pings being sent at the same time let next_interval = || { let random_secs = rand::rng().random_range(1..=5); @@ -350,29 +316,37 @@ impl Actor { _ = done.cancelled() => { trace!("actor loop cancelled, exiting"); // final flush - self.stream.flush().await.map_err(|_| FlushSnafu.build())?; + self.stream.flush().await.map_err(|_| e!(RunError::Flush))?; break; } maybe_frame = self.stream.next() => { - self.handle_frame(maybe_frame).await?; + self + .handle_frame(maybe_frame) + .await?; // reset the ping interval, we just received a message ping_interval.reset(); } // First priority, disco packets packet = self.disco_send_queue.recv() => { - let packet = packet.ok_or(DiscoSendQueuePacketDropSnafu.build())?; - self.send_disco_packet(packet).await.context(DiscoPacketSendSnafu)?; + let packet = packet.ok_or_else(|| e!(RunError::DiscoSendQueuePacketDrop))?; + self.send_disco_packet(packet) + .await + .map_err(|err| e!(RunError::DiscoPacketSend, err))?; } // Second priority, sending regular packets packet = self.send_queue.recv() => { - let packet = packet.ok_or(SendQueuePacketDropSnafu.build())?; - self.send_packet(packet).await.context(PacketSendSnafu)?; + let packet = packet.ok_or_else(|| e!(RunError::SendQueuePacketDrop))?; + self.send_packet(packet) + .await + .map_err(|err| e!(RunError::PacketSend, err))?; } // Last priority, sending left endpoints endpoint_id = self.endpoint_gone.recv() => { - let endpoint_id = endpoint_id.ok_or(EndpointGoneDropSnafu.build())?; + let endpoint_id = endpoint_id.ok_or_else(|| e!(RunError::EndpointGoneDrop))?; trace!("endpoint_id gone: {:?}", endpoint_id); - self.write_frame(RelayToClientMsg::EndpointGone(endpoint_id)).await.context(EndpointGoneWriteFrameSnafu)?; + self.write_frame(RelayToClientMsg::EndpointGone(endpoint_id)) + .await + .map_err(|err| e!(RunError::EndpointGoneWriteFrame, err))?; } _ = self.ping_tracker.timeout() => { trace!("pong timed out"); @@ -383,14 +357,16 @@ impl Actor { // new interval ping_interval.reset_after(next_interval()); let data = self.ping_tracker.new_ping(); - self.write_frame(RelayToClientMsg::Ping(data)).await.context(KeepAliveWriteFrameSnafu)?; + self.write_frame(RelayToClientMsg::Ping(data)) + .await + .map_err(|err| e!(RunError::KeepAliveWriteFrame, err))?; } } self.stream .flush() .await - .map_err(|_| TickFlushSnafu.build())?; + .map_err(|_| e!(RunError::TickFlush))?; } Ok(()) } @@ -457,7 +433,7 @@ impl Actor { trace!(?maybe_frame, "handle incoming frame"); let frame = match maybe_frame { Some(frame) => frame?, - None => return Err(StreamTerminatedSnafu.build()), + None => return Err(e!(HandleFrameError::StreamTerminated)), }; match frame { @@ -516,22 +492,11 @@ pub(crate) enum SendError { Closed, } -#[derive(Debug, Snafu)] -#[snafu(display("failed to forward {scope:?} packet: {reason:?}"))] -pub(crate) struct ForwardPacketError { +#[stack_error(derive, add_meta)] +#[error("failed to forward {scope:?} packet: {reason:?}")] +pub struct ForwardPacketError { scope: PacketScope, reason: SendError, - backtrace: Option, -} - -impl ForwardPacketError { - pub(crate) fn new(scope: PacketScope, reason: SendError) -> Self { - Self { - scope, - reason, - backtrace: GenerateImplicitData::generate(), - } - } } /// Tracks how many unique endpoints have been seen during the last day. @@ -569,8 +534,8 @@ impl ClientCounter { #[cfg(test)] mod tests { use iroh_base::SecretKey; + use n0_error::{Result, StdResultExt, bail_any}; use n0_future::Stream; - use n0_snafu::{Result, ResultExt}; use rand::SeedableRng; use tracing::info; use tracing_test::traced_test; @@ -579,7 +544,7 @@ mod tests { use crate::{client::conn::Conn, protos::common::FrameType}; async fn recv_frame< - E: snafu::Error + Sync + Send + 'static, + E: std::error::Error + Sync + Send + 'static, S: Stream> + Unpin, >( frame_type: FrameType, @@ -588,7 +553,7 @@ mod tests { match stream.next().await { Some(Ok(frame)) => { if frame_type != frame.typ() { - snafu::whatever!( + bail_any!( "Unexpected frame, got {:?}, but expected {:?}", frame.typ(), frame_type @@ -596,8 +561,8 @@ mod tests { } Ok(frame) } - Some(Err(err)) => Err(err).e(), - None => snafu::whatever!("Unexpected EOF, expected frame {frame_type:?}"), + Some(Err(err)) => Err(err).anyerr(), + None => bail_any!("Unexpected EOF, expected frame {frame_type:?}"), } } @@ -645,10 +610,13 @@ mod tests { src: endpoint_id, data: Datagrams::from(&data[..]), }; - send_queue_s.send(packet.clone()).await.context("send")?; + send_queue_s + .send(packet.clone()) + .await + .std_context("send")?; let frame = recv_frame(FrameType::RelayToClientDatagram, &mut io_rw) .await - .e()?; + .anyerr()?; assert_eq!( frame, RelayToClientMsg::Datagrams { @@ -662,10 +630,10 @@ mod tests { disco_send_queue_s .send(packet.clone()) .await - .context("send")?; + .std_context("send")?; let frame = recv_frame(FrameType::RelayToClientDatagram, &mut io_rw) .await - .e()?; + .anyerr()?; assert_eq!( frame, RelayToClientMsg::Datagrams { @@ -676,8 +644,10 @@ mod tests { // send peer_gone println!("send peer gone"); - peer_gone_s.send(endpoint_id).await.context("send")?; - let frame = recv_frame(FrameType::EndpointGone, &mut io_rw).await.e()?; + peer_gone_s.send(endpoint_id).await.std_context("send")?; + let frame = recv_frame(FrameType::EndpointGone, &mut io_rw) + .await + .anyerr()?; assert_eq!(frame, RelayToClientMsg::EndpointGone(endpoint_id)); // Read tests @@ -703,7 +673,7 @@ mod tests { datagrams: Datagrams::from(data), }) .await - .context("send")?; + .std_context("send")?; // send disco packet println!(" send disco packet"); @@ -717,10 +687,10 @@ mod tests { datagrams: disco_data.clone().into(), }) .await - .context("send")?; + .std_context("send")?; done.cancel(); - handle.await.context("join")?; + handle.await.std_context("join")?; Ok(()) } @@ -750,8 +720,8 @@ mod tests { // Send a frame, it should arrive. info!("-- send packet"); - frame_writer.send(frame.clone()).await.context("send")?; - frame_writer.flush().await.context("flush")?; + frame_writer.send(frame.clone()).await.std_context("send")?; + frame_writer.flush().await.std_context("flush")?; let recv_frame = tokio::time::timeout(Duration::from_millis(500), stream.next()) .await .expect("timeout") @@ -761,8 +731,8 @@ mod tests { // Next frame does not arrive. info!("-- send packet"); - frame_writer.send(frame.clone()).await.context("send")?; - frame_writer.flush().await.context("flush")?; + frame_writer.send(frame.clone()).await.std_context("send")?; + frame_writer.flush().await.std_context("flush")?; let res = tokio::time::timeout(Duration::from_millis(100), stream.next()).await; assert!(res.is_err(), "expecting a timeout"); info!("-- timeout happened"); diff --git a/iroh-relay/src/server/clients.rs b/iroh-relay/src/server/clients.rs index fe92439db1d..7fe77784e00 100644 --- a/iroh-relay/src/server/clients.rs +++ b/iroh-relay/src/server/clients.rs @@ -194,8 +194,8 @@ mod tests { use std::time::Duration; use iroh_base::SecretKey; + use n0_error::{Result, StdResultExt}; use n0_future::{Stream, StreamExt}; - use n0_snafu::{Result, ResultExt}; use rand::SeedableRng; use super::*; @@ -206,7 +206,7 @@ mod tests { }; async fn recv_frame< - E: snafu::Error + Sync + Send + 'static, + E: std::error::Error + Sync + Send + 'static, S: Stream> + Unpin, >( frame_type: FrameType, @@ -215,7 +215,7 @@ mod tests { match stream.next().await { Some(Ok(frame)) => { if frame_type != frame.typ() { - snafu::whatever!( + n0_error::bail_any!( "Unexpected frame, got {:?}, but expected {:?}", frame.typ(), frame_type @@ -223,8 +223,8 @@ mod tests { } Ok(frame) } - Some(Err(err)) => Err(err).e(), - None => snafu::whatever!("Unexpected EOF, expected frame {frame_type:?}"), + Some(Err(err)) => Err(err).anyerr(), + None => n0_error::bail_any!("Unexpected EOF, expected frame {frame_type:?}"), } } @@ -293,7 +293,7 @@ mod tests { } }) .await - .context("timeout")?; + .std_context("timeout")?; clients.shutdown().await; Ok(()) diff --git a/iroh-relay/src/server/http_server.rs b/iroh-relay/src/server/http_server.rs index b5b315560fc..40b1d773c35 100644 --- a/iroh-relay/src/server/http_server.rs +++ b/iroh-relay/src/server/http_server.rs @@ -15,9 +15,8 @@ use hyper::{ service::Service, upgrade::Upgraded, }; +use n0_error::{e, ensure, stack_error}; use n0_future::time::Elapsed; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, OptionExt, ResultExt, Snafu}; use tokio::net::{TcpListener, TcpStream}; use tokio_rustls_acme::AcmeAcceptor; use tokio_util::{sync::CancellationToken, task::AbortOnDropHandle}; @@ -37,7 +36,7 @@ use crate::{ streams::WsBytesFramed, }, server::{ - BindTcpListenerSnafu, ClientRateLimit, NoLocalAddrSnafu, + ClientRateLimit, client::Config, metrics::Metrics, streams::{MaybeTlsStream, RateLimited, RelayedStream}, @@ -77,7 +76,7 @@ fn body_full(content: impl Into) -> BytesBody { fn downcast_upgrade(upgraded: Upgraded) -> Result<(MaybeTlsStream, Bytes), ConnectionHandlerError> { match upgraded.downcast::>() { Ok(parts) => Ok((parts.io.into_inner(), parts.read_buf)), - Err(_) => Err(DowncastUpgradeSnafu.build()), + Err(_) => Err(e!(ConnectionHandlerError::DowncastUpgrade)), } } @@ -151,92 +150,69 @@ pub(super) struct TlsConfig { } /// Errors when attempting to upgrade and -#[common_fields({ - backtrace: Option, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum ServeConnectionError { - #[snafu(display("TLS[acme] handshake"))] + #[error("TLS[acme] handshake")] TlsHandshake { + #[error(std_err)] source: std::io::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("TLS[acme] serve connection"))] + #[error("TLS[acme] serve connection")] ServeConnection { + #[error(std_err)] source: hyper::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("TLS[manual] timeout"))] + #[error("TLS[manual] timeout")] Timeout { + #[error(std_err)] source: Elapsed, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("TLS[manual] accept"))] + #[error("TLS[manual] accept")] ManualAccept { + #[error(std_err)] source: std::io::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("TLS[acme] accept"))] + #[error("TLS[acme] accept")] LetsEncryptAccept { + #[error(std_err)] source: std::io::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("HTTPS connection"))] + #[error("HTTPS connection")] Https { + #[error(std_err)] source: hyper::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(display("HTTP connection"))] + #[error("HTTP connection")] Http { + #[error(std_err)] source: hyper::Error, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, } /// Server accept errors. -#[common_fields({ - backtrace: Option, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum AcceptError { - #[snafu(transparent)] + #[error(transparent)] Handshake { source: handshake::Error }, - #[snafu(display("rate limiting misconfigured"))] + #[error("rate limiting misconfigured")] RateLimitingMisconfigured { source: InvalidBucketConfig }, } /// Server connection errors, includes errors that can happen on `accept`. -#[common_fields({ - backtrace: Option, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum ConnectionHandlerError { - #[snafu(transparent)] + #[error(transparent)] Accept { source: AcceptError }, - #[snafu(display("Could not downcast the upgraded connection to MaybeTlsStream"))] - DowncastUpgrade { - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, - #[snafu(display("Cannot deal with buffered data yet: {buf:?}"))] - BufferNotEmpty { - buf: Bytes, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, - }, + #[error("Could not downcast the upgraded connection to MaybeTlsStream")] + DowncastUpgrade {}, + #[error("Cannot deal with buffered data yet: {buf:?}")] + BufferNotEmpty { buf: Bytes }, } /// Builder for the Relay HTTP Server. @@ -339,8 +315,6 @@ impl ServerBuilder { /// Builds and spawns an HTTP(S) Relay Server. pub(super) async fn spawn(self) -> Result { - use snafu::ResultExt; - let cancel_token = CancellationToken::new(); let service = RelayService::new( @@ -359,9 +333,11 @@ impl ServerBuilder { let listener = TcpListener::bind(&addr) .await - .map_err(|_| BindTcpListenerSnafu { addr }.build())?; + .map_err(|err| e!(super::SpawnError::BindTcpListener { addr }, err))?; - let addr = listener.local_addr().context(NoLocalAddrSnafu)?; + let addr = listener + .local_addr() + .map_err(|err| e!(super::SpawnError::NoLocalAddr, err))?; let http_str = tls_config.as_ref().map_or("HTTP/WS", |_| "HTTPS/WSS"); info!("[{http_str}] relay: serving on {addr}"); @@ -432,22 +408,22 @@ struct Inner { metrics: Arc, } -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] enum RelayUpgradeReqError { - #[snafu(display("missing header: {header}"))] + #[error("missing header: {header}")] MissingHeader { header: http::HeaderName }, - #[snafu(display("invalid header value for {header}: {details}"))] + #[error("invalid header value for {header}: {details}")] InvalidHeader { header: http::HeaderName, details: String, }, - #[snafu(display( + #[error( "invalid header value for {SEC_WEBSOCKET_VERSION}: unsupported websocket version, only supporting {SUPPORTED_WEBSOCKET_VERSION}" - ))] + )] UnsupportedWebsocketVersion, - #[snafu(display( + #[error( "invalid header value for {SEC_WEBSOCKET_PROTOCOL}: unsupported relay version: we support {we_support} but you only provide {you_support}" - ))] + )] UnsupportedRelayVersion { we_support: &'static str, you_support: String, @@ -474,41 +450,43 @@ impl RelayService { ) -> Result<&HeaderValue, RelayUpgradeReqError> { req.headers() .get(&header) - .context(MissingHeaderSnafu { header }) + .ok_or_else(|| e!(RelayUpgradeReqError::MissingHeader { header })) } let upgrade_header = expect_header(&req, UPGRADE)?; - snafu::ensure!( + ensure!( upgrade_header == HeaderValue::from_static(WEBSOCKET_UPGRADE_PROTOCOL), - InvalidHeaderSnafu { + RelayUpgradeReqError::InvalidHeader { header: UPGRADE, - details: format!("value must be {WEBSOCKET_UPGRADE_PROTOCOL}"), + details: format!("value must be {WEBSOCKET_UPGRADE_PROTOCOL}") } ); let key = expect_header(&req, SEC_WEBSOCKET_KEY)?.clone(); let version = expect_header(&req, SEC_WEBSOCKET_VERSION)?.clone(); - snafu::ensure!( + ensure!( version.as_bytes() == SUPPORTED_WEBSOCKET_VERSION.as_bytes(), - UnsupportedWebsocketVersionSnafu + RelayUpgradeReqError::UnsupportedWebsocketVersion ); let subprotocols = expect_header(&req, SEC_WEBSOCKET_PROTOCOL)? .to_str() .ok() - .context(InvalidHeaderSnafu { - header: SEC_WEBSOCKET_PROTOCOL, - details: "header value is not ascii".to_string(), + .ok_or_else(|| { + e!(RelayUpgradeReqError::InvalidHeader { + header: SEC_WEBSOCKET_PROTOCOL, + details: "header value is not ascii".to_string() + }) })?; let supports_our_version = subprotocols .split_whitespace() .any(|p| p == RELAY_PROTOCOL_VERSION); - snafu::ensure!( + ensure!( supports_our_version, - UnsupportedRelayVersionSnafu { + RelayUpgradeReqError::UnsupportedRelayVersion { we_support: RELAY_PROTOCOL_VERSION, - you_support: subprotocols.to_string(), + you_support: subprotocols.to_string() } ); @@ -576,7 +554,7 @@ impl Service> for RelayService { let res = match self.handle_relay_ws_upgrade(req) { Ok(response) => Ok(response), // It's convention to send back the version(s) we *do* support - Err(e @ RelayUpgradeReqError::UnsupportedWebsocketVersion) => self + Err(e @ RelayUpgradeReqError::UnsupportedWebsocketVersion { .. }) => self .build_response() .status(StatusCode::BAD_REQUEST) .header(SEC_WEBSOCKET_VERSION, SUPPORTED_WEBSOCKET_VERSION) @@ -638,7 +616,7 @@ impl Inner { debug!("relay_connection upgraded"); let (io, read_buf) = downcast_upgrade(upgraded)?; if !read_buf.is_empty() { - return Err(BufferNotEmptySnafu { buf: read_buf }.build()); + return Err(e!(ConnectionHandlerError::BufferNotEmpty { buf: read_buf })); } self.accept(io, client_auth_header).await?; @@ -663,7 +641,7 @@ impl Inner { trace!("accept: start"); let io = RateLimited::from_cfg(self.rate_limit, io, self.metrics.clone()) - .context(RateLimitingMisconfiguredSnafu)?; + .map_err(|err| e!(AcceptError::RateLimitingMisconfigured, err))?; // Create a server builder with default config let websocket = tokio_websockets::ServerBuilder::new() @@ -755,7 +733,7 @@ impl RelayService { debug!("HTTP: serve connection"); self.serve_connection(MaybeTlsStream::Plain(stream)) .await - .context(HttpSnafu) + .map_err(|err| e!(ServeConnectionError::Http, err)) } }; match res { @@ -791,7 +769,11 @@ impl RelayService { let TlsConfig { acceptor, config } = tls_config; match acceptor { TlsAcceptor::LetsEncrypt(a) => { - match a.accept(stream).await.context(LetsEncryptAcceptSnafu)? { + match a + .accept(stream) + .await + .map_err(|err| e!(ServeConnectionError::LetsEncryptAccept, err))? + { None => { info!("TLS[acme]: received TLS-ALPN-01 validation request"); } @@ -800,10 +782,10 @@ impl RelayService { let tls_stream = start_handshake .into_stream(config) .await - .context(TlsHandshakeSnafu)?; + .map_err(|err| e!(ServeConnectionError::TlsHandshake, err))?; self.serve_connection(MaybeTlsStream::Tls(tls_stream)) .await - .context(HttpsSnafu)?; + .map_err(|err| e!(ServeConnectionError::Https, err))?; } } } @@ -811,12 +793,12 @@ impl RelayService { debug!("TLS[manual]: accept"); let tls_stream = tokio::time::timeout(Duration::from_secs(30), a.accept(stream)) .await - .context(TimeoutSnafu)? - .context(ManualAcceptSnafu)?; + .map_err(|err| e!(ServeConnectionError::Timeout, err))? + .map_err(|err| e!(ServeConnectionError::ManualAccept, err))?; self.serve_connection(MaybeTlsStream::Tls(tls_stream)) .await - .context(ServeConnectionSnafu)?; + .map_err(|err| e!(ServeConnectionError::ServeConnection, err))?; } } Ok(()) @@ -866,11 +848,10 @@ mod tests { use std::sync::Arc; use iroh_base::{PublicKey, SecretKey}; + use n0_error::{Result, StdResultExt, bail_any}; use n0_future::{SinkExt, StreamExt}; - use n0_snafu::{Result, ResultExt}; use rand::SeedableRng; use reqwest::Url; - use snafu::whatever; use tracing::info; use tracing_test::traced_test; @@ -926,7 +907,7 @@ mod tests { let addr = if let std::net::IpAddr::V4(ipv4_addr) = addr.ip() { ipv4_addr } else { - whatever!("cannot get ipv4 addr from socket addr {addr:?}"); + bail_any!("cannot get ipv4 addr from socket addr {addr:?}"); }; info!("addr: {addr}:{port}"); @@ -1047,7 +1028,7 @@ mod tests { let addr = if let std::net::IpAddr::V4(ipv4_addr) = addr.ip() { ipv4_addr } else { - whatever!("cannot get ipv4 addr from socket addr {addr:?}"); + bail_any!("cannot get ipv4 addr from socket addr {addr:?}"); }; info!("Relay listening on: {addr}:{port}"); @@ -1102,7 +1083,7 @@ mod tests { client_a.close().await?; client_b.close().await?; server.shutdown(); - server.task_handle().await.context("join")?; + server.task_handle().await.std_context("join")?; Ok(()) } @@ -1138,7 +1119,7 @@ mod tests { let handler_task = tokio::spawn(async move { s.0.accept(MaybeTlsStream::Test(rw_a), None).await }); let mut client_a = make_test_client(client_a, &key_a).await?; - handler_task.await.context("join")??; + handler_task.await.std_context("join")??; info!("Create client B and connect it to the server."); let key_b = SecretKey::generate(&mut rng); @@ -1148,7 +1129,7 @@ mod tests { let handler_task = tokio::spawn(async move { s.0.accept(MaybeTlsStream::Test(rw_b), None).await }); let mut client_b = make_test_client(client_b, &key_b).await?; - handler_task.await.context("join")??; + handler_task.await.std_context("join")??; info!("Send message from A to B."); let msg = Datagrams::from(b"hello client b!!"); @@ -1167,7 +1148,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } @@ -1188,7 +1169,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } @@ -1238,7 +1219,7 @@ mod tests { let handler_task = tokio::spawn(async move { s.0.accept(MaybeTlsStream::Test(rw_a), None).await }); let mut client_a = make_test_client(client_a, &key_a).await?; - handler_task.await.context("join")??; + handler_task.await.std_context("join")??; info!("Create client B and connect it to the server."); let key_b = SecretKey::generate(&mut rng); @@ -1248,7 +1229,7 @@ mod tests { let handler_task = tokio::spawn(async move { s.0.accept(MaybeTlsStream::Test(rw_b), None).await }); let mut client_b = make_test_client(client_b, &key_b).await?; - handler_task.await.context("join")??; + handler_task.await.std_context("join")??; info!("Send message from A to B."); let msg = Datagrams::from(b"hello client b!!"); @@ -1267,7 +1248,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } @@ -1288,7 +1269,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } @@ -1298,7 +1279,7 @@ mod tests { let handler_task = tokio::spawn(async move { s.0.accept(MaybeTlsStream::Test(new_rw_b), None).await }); let mut new_client_b = make_test_client(new_client_b, &key_b).await?; - handler_task.await.context("join")??; + handler_task.await.std_context("join")??; // assert!(client_b.recv().await.is_err()); @@ -1319,7 +1300,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } @@ -1340,7 +1321,7 @@ mod tests { assert_eq!(msg, datagrams); } msg => { - whatever!("expected ReceivedDatagrams msg, got {msg:?}"); + bail_any!("expected ReceivedDatagrams msg, got {msg:?}"); } } diff --git a/iroh-relay/src/server/streams.rs b/iroh-relay/src/server/streams.rs index 545e61bd494..168f6ae0b80 100644 --- a/iroh-relay/src/server/streams.rs +++ b/iroh-relay/src/server/streams.rs @@ -6,8 +6,8 @@ use std::{ task::{Context, Poll}, }; +use n0_error::{ensure, stack_error}; use n0_future::{FutureExt, Sink, Stream, ready, time}; -use snafu::{Backtrace, Snafu}; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::instrument; @@ -75,14 +75,17 @@ impl RelayedStream { } /// Relay send errors -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum SendError { - #[snafu(transparent)] - StreamError { source: StreamError }, - #[snafu(display("Packet exceeds max packet size"))] + #[error(transparent)] + StreamError { + #[error(from, std_err)] + source: StreamError, + }, + #[error("Packet exceeds max packet size")] ExceedsMaxPacketSize { size: usize }, - #[snafu(display("Attempted to send empty packet"))] + #[error("Attempted to send empty packet")] EmptyPacket {}, } @@ -95,9 +98,12 @@ impl Sink for RelayedStream { fn start_send(mut self: Pin<&mut Self>, item: RelayToClientMsg) -> Result<(), Self::Error> { let size = item.encoded_len(); - snafu::ensure!(size <= MAX_PACKET_SIZE, ExceedsMaxPacketSizeSnafu { size }); + ensure!( + size <= MAX_PACKET_SIZE, + SendError::ExceedsMaxPacketSize { size } + ); if let RelayToClientMsg::Datagrams { datagrams, .. } = &item { - snafu::ensure!(!datagrams.contents.is_empty(), EmptyPacketSnafu); + ensure!(!datagrams.contents.is_empty(), SendError::EmptyPacket); } Pin::new(&mut self.inner) @@ -115,13 +121,16 @@ impl Sink for RelayedStream { } /// Relay receive errors -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum RecvError { - #[snafu(transparent)] + #[error(transparent)] Proto { source: ProtoError }, - #[snafu(transparent)] - StreamError { source: StreamError }, + #[error(transparent)] + StreamError { + #[error(std_err)] + source: StreamError, + }, } impl Stream for RelayedStream { @@ -278,11 +287,8 @@ struct Bucket { } #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] pub struct InvalidBucketConfig { - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, max: i64, bytes_per_second: i64, refill_period: time::Duration, @@ -296,13 +302,13 @@ impl Bucket { ) -> Result { // milliseconds is the tokio timer resolution let refill = bytes_per_second.saturating_mul(refill_period.as_millis() as i64) / 1000; - snafu::ensure!( + ensure!( max > 0 && bytes_per_second > 0 && refill_period.as_millis() as u32 > 0 && refill > 0, - InvalidBucketConfigSnafu { + InvalidBucketConfig { max, bytes_per_second, - refill_period, - }, + refill_period + } ); Ok(Self { fill: max, @@ -480,8 +486,8 @@ impl AsyncWrite for RateLimited { mod tests { use std::sync::Arc; + use n0_error::{Result, StdResultExt}; use n0_future::time; - use n0_snafu::{Result, ResultExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing_test::traced_test; @@ -521,7 +527,7 @@ mod tests { }, ) .await - .e()?; + .anyerr()?; let duration = time::Instant::now().duration_since(before); assert_ne!(duration.as_millis(), 0); diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 35137b51028..16932bc647a 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -39,10 +39,9 @@ http = "1" iroh-base = { version = "0.94.1", default-features = false, features = ["key", "relay"], path = "../iroh-base" } iroh-relay = { version = "0.94", path = "../iroh-relay", default-features = false } n0-future = "0.3.0" -n0-snafu = "0.2.2" -n0-watcher = "0.4" -nested_enum_utils = "0.2.1" -netwatch = { version = "0.11" } +n0-error = "0.1.0" +n0-watcher = "0.5" +netwatch = { version = "0.12" } pin-project = "1" pkarr = { version = "5", default-features = false, features = ["relays"] } quinn = { package = "iroh-quinn", version = "0.14.0", default-features = false, features = ["rustls-ring"] } @@ -56,7 +55,6 @@ reqwest = { version = "0.12", default-features = false, features = [ rustls = { version = "0.23.33", default-features = false, features = ["ring"] } serde = { version = "1.0.219", features = ["derive", "rc"] } smallvec = "1.11.1" -snafu = { version = "0.8.5", features = ["rust_1_81"] } strum = { version = "0.27", features = ["derive"] } tokio = { version = "1.44.1", features = [ "io-util", @@ -76,7 +74,7 @@ pkcs8 = "0.11.0-rc.7" rustls-platform-verifier = "0.5.3" # metrics -iroh-metrics = { version = "0.36", default-features = false } +iroh-metrics = { version = "0.37", default-features = false } # local-swarm-discovery swarm-discovery = { version = "0.4", optional = true } @@ -90,7 +88,7 @@ axum = { version = "0.8", optional = true } hickory-resolver = "0.25.1" igd-next = { version = "0.16", features = ["aio_tokio"] } netdev = { version = "0.38.1" } -portmapper = { version = "0.11", default-features = false } +portmapper = { version = "0.12", default-features = false } quinn = { package = "iroh-quinn", version = "0.14.0", default-features = false, features = ["runtime-tokio", "rustls-ring"] } tokio = { version = "1", features = [ "io-util", diff --git a/iroh/bench/Cargo.toml b/iroh/bench/Cargo.toml index 8fd7bfb9fe3..5ae9fcb08f3 100644 --- a/iroh/bench/Cargo.toml +++ b/iroh/bench/Cargo.toml @@ -9,9 +9,9 @@ publish = false bytes = "1.7" hdrhistogram = { version = "7.2", default-features = false } iroh = { path = ".." } -iroh-metrics = "0.36" +iroh-metrics = "0.37" n0-future = "0.3.0" -n0-snafu = "0.2.0" +n0-error = "0.1.0" quinn = { package = "iroh-quinn", version = "0.14" } rand = "0.9.2" rcgen = "0.14" diff --git a/iroh/bench/src/bin/bulk.rs b/iroh/bench/src/bin/bulk.rs index b666a1a20fd..b38d87e02f3 100644 --- a/iroh/bench/src/bin/bulk.rs +++ b/iroh/bench/src/bin/bulk.rs @@ -5,7 +5,7 @@ use clap::Parser; use iroh_bench::quinn; use iroh_bench::{Commands, Opt, configure_tracing_subscriber, iroh, rt, s2n}; use iroh_metrics::{MetricValue, MetricsGroup}; -use n0_snafu::Result; +use n0_error::Result; fn main() { let cmd = Commands::parse(); diff --git a/iroh/bench/src/iroh.rs b/iroh/bench/src/iroh.rs index b3f0eb84155..f8dd6bd1878 100644 --- a/iroh/bench/src/iroh.rs +++ b/iroh/bench/src/iroh.rs @@ -8,7 +8,7 @@ use iroh::{ Endpoint, EndpointAddr, RelayMode, RelayUrl, endpoint::{Connection, ConnectionError, RecvStream, SendStream, TransportConfig}, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StackResultExt, StdResultExt}; use tracing::{trace, warn}; use crate::{ @@ -154,7 +154,7 @@ async fn drain_stream( let mut num_chunks: u64 = 0; if read_unordered { - while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.e()? { + while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.anyerr()? { if first_byte { ttfb = download_start.elapsed(); first_byte = false; @@ -176,7 +176,7 @@ async fn drain_stream( Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), ]; - while let Some(n) = stream.read_chunks(&mut bufs[..]).await.e()? { + while let Some(n) = stream.read_chunks(&mut bufs[..]).await.anyerr()? { if first_byte { ttfb = download_start.elapsed(); first_byte = false; @@ -200,21 +200,21 @@ async fn send_data_on_stream(stream: &mut SendStream, stream_size: u64) -> Resul stream .write_chunk(bytes_data.clone()) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } if remaining != 0 { stream .write_chunk(bytes_data.slice(0..remaining)) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } - stream.finish().context("failed finishing stream")?; + stream.finish().std_context("failed finishing stream")?; stream .stopped() .await - .context("failed to wait for stream to be stopped")?; + .std_context("failed to wait for stream to be stopped")?; Ok(()) } @@ -229,7 +229,7 @@ pub async fn handle_client_stream( let (mut send_stream, mut recv_stream) = connection .open_bi() .await - .context("failed to open stream")?; + .std_context("failed to open stream")?; send_data_on_stream(&mut send_stream, upload_size).await?; @@ -258,7 +258,7 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { continue; } }; - let connection = connecting.await.context("handshake failed")?; + let connection = connecting.await.std_context("handshake failed")?; server_tasks.push(tokio::spawn(async move { loop { @@ -275,7 +275,7 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { tokio::spawn(async move { drain_stream(&mut recv_stream, opt.read_unordered).await?; send_data_on_stream(&mut send_stream, opt.download_size).await?; - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } diff --git a/iroh/bench/src/lib.rs b/iroh/bench/src/lib.rs index 4921793db69..0e6669b38a6 100644 --- a/iroh/bench/src/lib.rs +++ b/iroh/bench/src/lib.rs @@ -6,7 +6,7 @@ use std::{ }; use clap::Parser; -use n0_snafu::Result; +use n0_error::Result; use stats::Stats; use tokio::{ runtime::{Builder, Runtime}, diff --git a/iroh/bench/src/quinn.rs b/iroh/bench/src/quinn.rs index e9ca57c33ad..fcb340ce702 100644 --- a/iroh/bench/src/quinn.rs +++ b/iroh/bench/src/quinn.rs @@ -5,7 +5,7 @@ use std::{ }; use bytes::Bytes; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; use quinn::{ Connection, Endpoint, RecvStream, SendStream, TransportConfig, crypto::rustls::QuicClientConfig, }; @@ -73,7 +73,7 @@ pub async fn connect_client( quinn::Endpoint::client(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)).unwrap(); let mut roots = RootCertStore::empty(); - roots.add(server_cert).e()?; + roots.add(server_cert).anyerr()?; let provider = rustls::crypto::ring::default_provider(); @@ -84,14 +84,14 @@ pub async fn connect_client( .with_no_client_auth(); let mut client_config = - quinn::ClientConfig::new(Arc::new(QuicClientConfig::try_from(crypto).e()?)); + quinn::ClientConfig::new(Arc::new(QuicClientConfig::try_from(crypto).anyerr()?)); client_config.transport_config(Arc::new(transport_config(opt.max_streams, opt.initial_mtu))); let connection = endpoint .connect_with(client_config, server_addr, "localhost") .unwrap() .await - .context("unable to connect")?; + .std_context("unable to connect")?; trace!("connected"); Ok((endpoint, connection)) @@ -125,7 +125,7 @@ async fn drain_stream( let mut num_chunks: u64 = 0; if read_unordered { - while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.e()? { + while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.anyerr()? { if first_byte { ttfb = download_start.elapsed(); first_byte = false; @@ -147,7 +147,7 @@ async fn drain_stream( Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), ]; - while let Some(n) = stream.read_chunks(&mut bufs[..]).await.e()? { + while let Some(n) = stream.read_chunks(&mut bufs[..]).await.anyerr()? { if first_byte { ttfb = download_start.elapsed(); first_byte = false; @@ -171,21 +171,21 @@ async fn send_data_on_stream(stream: &mut SendStream, stream_size: u64) -> Resul stream .write_chunk(bytes_data.clone()) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } if remaining != 0 { stream .write_chunk(bytes_data.slice(0..remaining)) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } - stream.finish().context("failed finishing stream")?; + stream.finish().std_context("failed finishing stream")?; stream .stopped() .await - .context("failed to wait for stream to be stopped")?; + .std_context("failed to wait for stream to be stopped")?; Ok(()) } @@ -200,7 +200,7 @@ pub async fn handle_client_stream( let (mut send_stream, mut recv_stream) = connection .open_bi() .await - .context("failed to open stream")?; + .std_context("failed to open stream")?; send_data_on_stream(&mut send_stream, upload_size).await?; @@ -229,7 +229,7 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { continue; } }; - let connection = connecting.await.context("handshake failed")?; + let connection = connecting.await.std_context("handshake failed")?; server_tasks.push(tokio::spawn(async move { loop { @@ -246,7 +246,7 @@ pub async fn server(endpoint: Endpoint, opt: Opt) -> Result<()> { tokio::spawn(async move { drain_stream(&mut recv_stream, opt.read_unordered).await?; send_data_on_stream(&mut send_stream, opt.download_size).await?; - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } diff --git a/iroh/examples/0rtt.rs b/iroh/examples/0rtt.rs index 99069eb28ba..907d18b5d0a 100644 --- a/iroh/examples/0rtt.rs +++ b/iroh/examples/0rtt.rs @@ -6,8 +6,8 @@ use iroh::{ EndpointId, SecretKey, endpoint::{Connecting, Connection}, }; +use n0_error::{Result, StackResultExt, StdResultExt, bail_any}; use n0_future::{StreamExt, future}; -use n0_snafu::ResultExt; use n0_watcher::Watcher; use tracing::{info, trace}; @@ -27,7 +27,7 @@ struct Args { /// Gets a secret key from the IROH_SECRET environment variable or generates a new random one. /// If the environment variable is set, it must be a valid string representation of a secret key. -pub fn get_or_generate_secret_key() -> n0_snafu::Result { +pub fn get_or_generate_secret_key() -> Result { if let Ok(secret) = env::var("IROH_SECRET") { // Parse the secret key from string SecretKey::from_str(&secret).context("Invalid secret key format") @@ -52,28 +52,28 @@ async fn pingpong( connection: &Connection, proceed: impl Future, x: u64, -) -> n0_snafu::Result<()> { - let (mut send, recv) = connection.open_bi().await.e()?; +) -> Result<()> { + let (mut send, recv) = connection.open_bi().await.anyerr()?; let data = x.to_be_bytes(); - send.write_all(&data).await.e()?; - send.finish().e()?; + send.write_all(&data).await.anyerr()?; + send.finish().anyerr()?; let mut recv = if proceed.await { // use recv directly if we can proceed recv } else { // proceed returned false, so we have learned that the 0-RTT send was rejected. // at this point we have a fully handshaked connection, so we try again. - let (mut send, recv) = connection.open_bi().await.e()?; - send.write_all(&data).await.e()?; - send.finish().e()?; + let (mut send, recv) = connection.open_bi().await.anyerr()?; + send.write_all(&data).await.anyerr()?; + send.finish().anyerr()?; recv }; - let echo = recv.read_to_end(8).await.e()?; + let echo = recv.read_to_end(8).await.anyerr()?; assert!(echo == data); Ok(()) } -async fn pingpong_0rtt(connecting: Connecting, i: u64) -> n0_snafu::Result { +async fn pingpong_0rtt(connecting: Connecting, i: u64) -> Result { let connection = match connecting.into_0rtt() { Ok((connection, accepted)) => { trace!("0-RTT possible from our side"); @@ -82,7 +82,7 @@ async fn pingpong_0rtt(connecting: Connecting, i: u64) -> n0_snafu::Result { trace!("0-RTT not possible from our side"); - let connection = connecting.await.e()?; + let connection = connecting.await.anyerr()?; pingpong(&connection, future::ready(true), i).await?; connection } @@ -90,8 +90,8 @@ async fn pingpong_0rtt(connecting: Connecting, i: u64) -> n0_snafu::Result n0_snafu::Result<()> { - let remote_id = args.endpoint_id.unwrap(); +async fn connect(args: Args) -> Result<()> { + let remote_id = args.endpoint_id.context("Missing endpoint id")?; let endpoint = iroh::Endpoint::builder() .relay_mode(iroh::RelayMode::Disabled) .keylog(true) @@ -104,7 +104,7 @@ async fn connect(args: Args) -> n0_snafu::Result<()> { .connect_with_opts(remote_id, PINGPONG_ALPN, Default::default()) .await?; let connection = if args.disable_0rtt { - let connection = connecting.await.e()?; + let connection = connecting.await.anyerr()?; trace!("connecting without 0-RTT"); pingpong(&connection, future::ready(true), i).await?; connection @@ -130,7 +130,7 @@ async fn connect(args: Args) -> n0_snafu::Result<()> { Ok(()) } -async fn accept(_args: Args) -> n0_snafu::Result<()> { +async fn accept(_args: Args) -> Result<()> { let secret_key = get_or_generate_secret_key()?; let endpoint = iroh::Endpoint::builder() .alpns(vec![PINGPONG_ALPN.to_vec()]) @@ -141,7 +141,7 @@ async fn accept(_args: Args) -> n0_snafu::Result<()> { let mut addrs = endpoint.watch_addr().stream(); let addr = loop { let Some(addr) = addrs.next().await else { - snafu::whatever!("Address stream closed"); + bail_any!("Address stream closed"); }; if !addr.ip_addrs().count() == 0 { break addr; @@ -152,18 +152,18 @@ async fn accept(_args: Args) -> n0_snafu::Result<()> { let accept = async move { while let Some(incoming) = endpoint.accept().await { tokio::spawn(async move { - let connecting = incoming.accept().e()?; + let connecting = incoming.accept().anyerr()?; let (connection, _zero_rtt_accepted) = connecting .into_0rtt() .expect("accept into 0.5 RTT always succeeds"); - let (mut send, mut recv) = connection.accept_bi().await.e()?; + let (mut send, mut recv) = connection.accept_bi().await.anyerr()?; trace!("recv.is_0rtt: {}", recv.is_0rtt()); - let data = recv.read_to_end(8).await.e()?; + let data = recv.read_to_end(8).await.anyerr()?; trace!("recv: {}", data.len()); - send.write_all(&data).await.e()?; - send.finish().e()?; + send.write_all(&data).await.anyerr()?; + send.finish().anyerr()?; connection.closed().await; - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } }; @@ -179,7 +179,7 @@ async fn accept(_args: Args) -> n0_snafu::Result<()> { } #[tokio::main] -async fn main() -> n0_snafu::Result<()> { +async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let args = Args::parse(); if args.endpoint_id.is_some() { diff --git a/iroh/examples/connect-unreliable.rs b/iroh/examples/connect-unreliable.rs index 991c40d8110..46a19bd8dc4 100644 --- a/iroh/examples/connect-unreliable.rs +++ b/iroh/examples/connect-unreliable.rs @@ -10,7 +10,7 @@ use std::net::SocketAddr; use clap::Parser; use iroh::{Endpoint, EndpointAddr, RelayMode, RelayUrl, SecretKey}; use iroh_base::TransportAddr; -use n0_snafu::ResultExt; +use n0_error::{Result, StdResultExt}; use tracing::info; // An example ALPN that we are using to communicate over the `Endpoint` @@ -30,7 +30,7 @@ struct Cli { } #[tokio::main] -async fn main() -> n0_snafu::Result<()> { +async fn main() -> Result<()> { tracing_subscriber::fmt::init(); println!("\nconnect (unreliable) example!\n"); let args = Cli::parse(); @@ -83,11 +83,12 @@ async fn main() -> n0_snafu::Result<()> { // Send a datagram over the connection. let message = format!("{me} is saying 'hello!'"); - conn.send_datagram(message.as_bytes().to_vec().into()).e()?; + conn.send_datagram(message.as_bytes().to_vec().into()) + .anyerr()?; // Read a datagram over the connection. - let message = conn.read_datagram().await.e()?; - let message = String::from_utf8(message.into()).e()?; + let message = conn.read_datagram().await.anyerr()?; + let message = String::from_utf8(message.into()).anyerr()?; println!("received: {message}"); Ok(()) diff --git a/iroh/examples/connect.rs b/iroh/examples/connect.rs index 7c2da47a8f7..6ac6482519f 100644 --- a/iroh/examples/connect.rs +++ b/iroh/examples/connect.rs @@ -9,7 +9,7 @@ use std::net::SocketAddr; use clap::Parser; use iroh::{Endpoint, EndpointAddr, RelayMode, RelayUrl, SecretKey, TransportAddr}; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; use tracing::info; // An example ALPN that we are using to communicate over the `Endpoint` @@ -81,15 +81,15 @@ async fn main() -> Result<()> { info!("connected"); // Use the Quinn API to send and recv content. - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; let message = format!("{me} is saying 'hello!'"); - send.write_all(message.as_bytes()).await.e()?; + send.write_all(message.as_bytes()).await.anyerr()?; // Call `finish` to close the send side of the connection gracefully. - send.finish().e()?; - let message = recv.read_to_end(100).await.e()?; - let message = String::from_utf8(message).e()?; + send.finish().anyerr()?; + let message = recv.read_to_end(100).await.anyerr()?; + let message = String::from_utf8(message).anyerr()?; println!("received: {message}"); // We received the last message: close all connections and allow for the close diff --git a/iroh/examples/dht_discovery.rs b/iroh/examples/dht_discovery.rs index 8dfc1969b20..5fc0ec8af45 100644 --- a/iroh/examples/dht_discovery.rs +++ b/iroh/examples/dht_discovery.rs @@ -12,7 +12,7 @@ use std::str::FromStr; use clap::Parser; use iroh::{Endpoint, EndpointId}; -use n0_snafu::ResultExt; +use n0_error::{Result, StdResultExt}; use tracing::warn; use url::Url; @@ -61,7 +61,7 @@ fn build_discovery(args: Args) -> iroh::discovery::pkarr::dht::Builder { } } -async fn chat_server(args: Args) -> n0_snafu::Result<()> { +async fn chat_server(args: Args) -> Result<()> { let secret_key = iroh::SecretKey::generate(&mut rand::rng()); let endpoint_id = secret_key.public(); let discovery = build_discovery(args); @@ -72,7 +72,7 @@ async fn chat_server(args: Args) -> n0_snafu::Result<()> { .bind() .await?; let zid = pkarr::PublicKey::try_from(endpoint_id.as_bytes()) - .e()? + .anyerr()? .to_z32(); println!("Listening on {endpoint_id}"); println!("pkarr z32: {zid}"); @@ -88,11 +88,11 @@ async fn chat_server(args: Args) -> n0_snafu::Result<()> { } }; tokio::spawn(async move { - let connection = connecting.await.e()?; + let connection = connecting.await.anyerr()?; let remote_endpoint_id = connection.remote_id()?; println!("got connection from {remote_endpoint_id}"); // just leave the tasks hanging. this is just an example. - let (mut writer, mut reader) = connection.accept_bi().await.e()?; + let (mut writer, mut reader) = connection.accept_bi().await.anyerr()?; let _copy_to_stdout = tokio::spawn(async move { tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await }); @@ -100,13 +100,13 @@ async fn chat_server(args: Args) -> n0_snafu::Result<()> { tokio::spawn( async move { tokio::io::copy(&mut tokio::io::stdin(), &mut writer).await }, ); - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } Ok(()) } -async fn chat_client(args: Args) -> n0_snafu::Result<()> { +async fn chat_client(args: Args) -> Result<()> { let remote_endpoint_id = args.endpoint_id.unwrap(); let secret_key = iroh::SecretKey::generate(&mut rand::rng()); let endpoint_id = secret_key.public(); @@ -121,18 +121,18 @@ async fn chat_client(args: Args) -> n0_snafu::Result<()> { println!("We are {endpoint_id} and connecting to {remote_endpoint_id}"); let connection = endpoint.connect(remote_endpoint_id, CHAT_ALPN).await?; println!("connected to {remote_endpoint_id}"); - let (mut writer, mut reader) = connection.open_bi().await.e()?; + let (mut writer, mut reader) = connection.open_bi().await.anyerr()?; let _copy_to_stdout = tokio::spawn(async move { tokio::io::copy(&mut reader, &mut tokio::io::stdout()).await }); let _copy_from_stdin = tokio::spawn(async move { tokio::io::copy(&mut tokio::io::stdin(), &mut writer).await }); - _copy_to_stdout.await.e()?.e()?; - _copy_from_stdin.await.e()?.e()?; + _copy_to_stdout.await.anyerr()?.anyerr()?; + _copy_from_stdin.await.anyerr()?.anyerr()?; Ok(()) } #[tokio::main] -async fn main() -> n0_snafu::Result<()> { +async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let args = Args::parse(); if args.endpoint_id.is_some() { diff --git a/iroh/examples/echo-no-router.rs b/iroh/examples/echo-no-router.rs index 770ddce3f32..4427320d36b 100644 --- a/iroh/examples/echo-no-router.rs +++ b/iroh/examples/echo-no-router.rs @@ -8,7 +8,7 @@ //! cargo run --example echo-no-router --features=examples use iroh::{Endpoint, EndpointAddr}; -use n0_snafu::{Error, Result, ResultExt}; +use n0_error::{AnyError as Error, Result, StdResultExt}; /// Each protocol is identified by its ALPN string. /// @@ -39,16 +39,16 @@ async fn connect_side(addr: EndpointAddr) -> Result<()> { let conn = endpoint.connect(addr, ALPN).await?; // Open a bidirectional QUIC stream - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; // Send some data to be echoed - send.write_all(b"Hello, world!").await.e()?; + send.write_all(b"Hello, world!").await.anyerr()?; // Signal the end of data for this particular stream - send.finish().e()?; + send.finish().anyerr()?; // Receive the echo, but limit reading up to maximum 1000 bytes - let response = recv.read_to_end(1000).await.e()?; + let response = recv.read_to_end(1000).await.anyerr()?; assert_eq!(&response, b"Hello, world!"); // Explicitly close the whole connection. @@ -86,7 +86,7 @@ async fn start_accept_side() -> Result { while let Some(incoming) = endpoint.accept().await { // spawn a task for each incoming connection, so we can serve multiple connections asynchronously tokio::spawn(async move { - let connection = incoming.await.e()?; + let connection = incoming.await.anyerr()?; // We can get the remote's endpoint id from the connection. let endpoint_id = connection.remote_id()?; @@ -94,16 +94,16 @@ async fn start_accept_side() -> Result { // Our protocol is a simple request-response protocol, so we expect the // connecting peer to open a single bi-directional stream. - let (mut send, mut recv) = connection.accept_bi().await.e()?; + let (mut send, mut recv) = connection.accept_bi().await.anyerr()?; // Echo any bytes received back directly. // This will keep copying until the sender signals the end of data on the stream. - let bytes_sent = tokio::io::copy(&mut recv, &mut send).await.e()?; + let bytes_sent = tokio::io::copy(&mut recv, &mut send).await.anyerr()?; println!("Copied over {bytes_sent} byte(s)"); // By calling `finish` on the send stream we signal that we will not send anything // further, which makes the receive stream on the other end terminate. - send.finish().e()?; + send.finish().anyerr()?; // Wait until the remote closes the connection, which it does once it // received the response. diff --git a/iroh/examples/echo.rs b/iroh/examples/echo.rs index a378b6469a9..2430abb14de 100644 --- a/iroh/examples/echo.rs +++ b/iroh/examples/echo.rs @@ -11,7 +11,7 @@ use iroh::{ endpoint::Connection, protocol::{AcceptError, ProtocolHandler, Router}, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; /// Each protocol is identified by its ALPN string. /// @@ -29,7 +29,7 @@ async fn main() -> Result<()> { connect_side(router.endpoint().addr()).await?; // This makes sure the endpoint in the router is closed properly and connections close gracefully - router.shutdown().await.e()?; + router.shutdown().await.anyerr()?; Ok(()) } @@ -41,16 +41,16 @@ async fn connect_side(addr: EndpointAddr) -> Result<()> { let conn = endpoint.connect(addr, ALPN).await?; // Open a bidirectional QUIC stream - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; // Send some data to be echoed - send.write_all(b"Hello, world!").await.e()?; + send.write_all(b"Hello, world!").await.anyerr()?; // Signal the end of data for this particular stream - send.finish().e()?; + send.finish().anyerr()?; // Receive the echo, but limit reading up to maximum 1000 bytes - let response = recv.read_to_end(1000).await.e()?; + let response = recv.read_to_end(1000).await.anyerr()?; assert_eq!(&response, b"Hello, world!"); // Explicitly close the whole connection. diff --git a/iroh/examples/listen-unreliable.rs b/iroh/examples/listen-unreliable.rs index 15c39d54c4a..75c39ec96c7 100644 --- a/iroh/examples/listen-unreliable.rs +++ b/iroh/examples/listen-unreliable.rs @@ -4,7 +4,7 @@ //! run this example from the project root: //! $ cargo run --example listen-unreliable use iroh::{Endpoint, RelayMode, SecretKey}; -use n0_snafu::{Error, Result, ResultExt}; +use n0_error::{AnyError as Error, Result, StdResultExt}; use tracing::{info, warn}; // An example ALPN that we are using to communicate over the `Endpoint` @@ -72,7 +72,7 @@ async fn main() -> Result<()> { } }; let alpn = connecting.alpn().await?; - let conn = connecting.await.e()?; + let conn = connecting.await.anyerr()?; let endpoint_id = conn.remote_id()?; info!( "new (unreliable) connection from {endpoint_id} with ALPN {}", @@ -82,11 +82,12 @@ async fn main() -> Result<()> { tokio::spawn(async move { // use the `quinn` API to read a datagram off the connection, and send a datagra, in return while let Ok(message) = conn.read_datagram().await { - let message = String::from_utf8(message.into()).e()?; + let message = String::from_utf8(message.into()).anyerr()?; println!("received: {message}"); let message = format!("hi! you connected to {me}. bye bye"); - conn.send_datagram(message.as_bytes().to_vec().into()).e()?; + conn.send_datagram(message.as_bytes().to_vec().into()) + .anyerr()?; } Ok::<_, Error>(()) diff --git a/iroh/examples/listen.rs b/iroh/examples/listen.rs index 96077508848..f1e9df9e617 100644 --- a/iroh/examples/listen.rs +++ b/iroh/examples/listen.rs @@ -6,14 +6,14 @@ use std::time::Duration; use iroh::{Endpoint, RelayMode, SecretKey, endpoint::ConnectionError}; -use n0_snafu::ResultExt; +use n0_error::{Result, StdResultExt}; use tracing::{debug, info, warn}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; #[tokio::main] -async fn main() -> n0_snafu::Result<()> { +async fn main() -> Result<()> { tracing_subscriber::fmt::init(); println!("\nlisten example!\n"); let secret_key = SecretKey::generate(&mut rand::rng()); @@ -70,7 +70,7 @@ async fn main() -> n0_snafu::Result<()> { } }; let alpn = connecting.alpn().await?; - let conn = connecting.await.e()?; + let conn = connecting.await.anyerr()?; let endpoint_id = conn.remote_id()?; info!( "new connection from {endpoint_id} with ALPN {}", @@ -81,16 +81,16 @@ async fn main() -> n0_snafu::Result<()> { tokio::spawn(async move { // accept a bi-directional QUIC connection // use the `quinn` APIs to send and recv content - let (mut send, mut recv) = conn.accept_bi().await.e()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; debug!("accepted bi stream, waiting for data..."); - let message = recv.read_to_end(100).await.e()?; - let message = String::from_utf8(message).e()?; + let message = recv.read_to_end(100).await.anyerr()?; + let message = String::from_utf8(message).anyerr()?; println!("received: {message}"); let message = format!("hi! you connected to {me}. bye bye"); - send.write_all(message.as_bytes()).await.e()?; + send.write_all(message.as_bytes()).await.anyerr()?; // call `finish` to close the connection gracefully - send.finish().e()?; + send.finish().anyerr()?; // We sent the last message, so wait for the client to close the connection once // it received this message. @@ -104,7 +104,7 @@ async fn main() -> n0_snafu::Result<()> { if res.is_err() { println!("endpoint {endpoint_id} did not disconnect within 3 seconds"); } - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } // stop with SIGINT (ctrl-c) diff --git a/iroh/examples/locally-discovered-nodes.rs b/iroh/examples/locally-discovered-nodes.rs index 08e6226d94b..eb7cd9783b2 100644 --- a/iroh/examples/locally-discovered-nodes.rs +++ b/iroh/examples/locally-discovered-nodes.rs @@ -10,8 +10,8 @@ use iroh::{ discovery::mdns::{DiscoveryEvent, MdnsDiscovery}, endpoint_info::UserData, }; +use n0_error::Result; use n0_future::StreamExt; -use n0_snafu::Result; use tokio::task::JoinSet; #[tokio::main] @@ -73,7 +73,7 @@ async fn main() -> Result<()> { ep.set_user_data_for_discovery(Some(ud)); tokio::time::sleep(Duration::from_secs(3)).await; ep.close().await; - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } diff --git a/iroh/examples/screening-connection.rs b/iroh/examples/screening-connection.rs index d556649b16c..a821467db02 100644 --- a/iroh/examples/screening-connection.rs +++ b/iroh/examples/screening-connection.rs @@ -17,7 +17,7 @@ use iroh::{ endpoint::{Connecting, Connection}, protocol::{AcceptError, ProtocolHandler, Router}, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt, e}; /// Each protocol is identified by its ALPN string. /// @@ -40,7 +40,7 @@ async fn main() -> Result<()> { connect_side(&endpoint_addr).await?; // This makes sure the endpoint in the router is closed properly and connections close gracefully - router.shutdown().await.e()?; + router.shutdown().await.anyerr()?; Ok(()) } @@ -52,16 +52,16 @@ async fn connect_side(addr: &EndpointAddr) -> Result<()> { let conn = endpoint.connect(addr.clone(), ALPN).await?; // Open a bidirectional QUIC stream - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; // Send some data to be echoed - send.write_all(b"Hello, world!").await.e()?; + send.write_all(b"Hello, world!").await.anyerr()?; // Signal the end of data for this particular stream - send.finish().e()?; + send.finish().anyerr()?; // Receive the echo, but limit reading up to maximum 1000 bytes - let response = recv.read_to_end(1000).await.e()?; + let response = recv.read_to_end(1000).await.anyerr()?; assert_eq!(&response, b"Hello, world!"); // Explicitly close the whole connection. @@ -110,7 +110,7 @@ impl ProtocolHandler for ScreenedEcho { // reject every other connection if count % 2 == 0 { println!("rejecting connection"); - return Err(AcceptError::NotAllowed {}); + return Err(e!(AcceptError::NotAllowed)); } // To allow normal connection construction, await the connecting future & return diff --git a/iroh/examples/search.rs b/iroh/examples/search.rs index e7c2df5aa2a..8c9a6ea9431 100644 --- a/iroh/examples/search.rs +++ b/iroh/examples/search.rs @@ -37,7 +37,7 @@ use iroh::{ endpoint::Connection, protocol::{AcceptError, ProtocolHandler, Router}, }; -use n0_snafu::{Result, ResultExt}; +use n0_error::{Result, StdResultExt}; use tokio::sync::Mutex; use tracing_subscriber::{EnvFilter, prelude::*}; @@ -97,7 +97,7 @@ async fn main() -> Result<()> { } // Wait for Ctrl-C to be pressed. - tokio::signal::ctrl_c().await.e()?; + tokio::signal::ctrl_c().await.anyerr()?; } Command::Query { endpoint_id, query } => { // Query the remote endpoint. @@ -110,7 +110,7 @@ async fn main() -> Result<()> { } } - router.shutdown().await.e()?; + router.shutdown().await.anyerr()?; Ok(()) } @@ -178,21 +178,21 @@ impl BlobSearch { let conn = self.endpoint.connect(endpoint_id, ALPN).await?; // Open a bi-directional in our connection. - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; // Send our query. - send.write_all(query.as_bytes()).await.e()?; + send.write_all(query.as_bytes()).await.anyerr()?; // Finish the send stream, signalling that no further data will be sent. // This makes the `read_to_end` call on the accepting side terminate. - send.finish().e()?; + send.finish().anyerr()?; // The response is a 64 bit integer // We simply read it into a byte buffer. let mut num_matches = [0u8; 8]; // Read 8 bytes from the stream. - recv.read_exact(&mut num_matches).await.e()?; + recv.read_exact(&mut num_matches).await.anyerr()?; let num_matches = u64::from_le_bytes(num_matches); diff --git a/iroh/examples/transfer.rs b/iroh/examples/transfer.rs index 54097fd90e9..ea0c42f9554 100644 --- a/iroh/examples/transfer.rs +++ b/iroh/examples/transfer.rs @@ -17,8 +17,8 @@ use iroh::{ dns::{DnsResolver, N0_DNS_ENDPOINT_ORIGIN_PROD, N0_DNS_ENDPOINT_ORIGIN_STAGING}, endpoint::ConnectionError, }; +use n0_error::{Result, StackResultExt, StdResultExt}; use n0_future::task::AbortOnDropHandle; -use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher as _; use tokio_stream::StreamExt; use tracing::{info, warn}; @@ -212,7 +212,7 @@ impl EndpointArgs { } #[cfg(not(feature = "test-utils"))] { - snafu::whatever!( + n0_error::bail_any!( "Must have the `test-utils` feature enabled when using the `--env=dev` flag" ) } @@ -248,7 +248,7 @@ impl EndpointArgs { } #[cfg(not(feature = "test-utils"))] { - snafu::whatever!( + n0_error::bail_any!( "Must have the `discovery-local-network` enabled when using the `--mdns` flag" ); } @@ -257,9 +257,9 @@ impl EndpointArgs { if let Some(host) = self.dns_server { let addr = tokio::net::lookup_host(host) .await - .context("Failed to resolve DNS server address")? + .std_context("Failed to resolve DNS server address")? .next() - .context("Failed to resolve DNS server address")?; + .std_context("Failed to resolve DNS server address")?; builder = builder.dns_resolver(DnsResolver::with_nameserver(addr)); } else if self.env == Env::Dev { let addr = DEV_DNS_SERVER.parse().expect("valid addr"); @@ -279,7 +279,7 @@ impl EndpointArgs { } #[cfg(not(feature = "discovery-local-network"))] { - snafu::whatever!( + n0_error::bail_any!( "Must have the `test-utils` feature enabled when using the `--relay-only` flag" ); } @@ -339,7 +339,7 @@ async fn provide(endpoint: Endpoint, size: u64) -> Result<()> { // spawn a task to handle reading and writing off of the connection let endpoint_clone = endpoint.clone(); tokio::spawn(async move { - let conn = connecting.await.e()?; + let conn = connecting.await.anyerr()?; let endpoint_id = conn.remote_id()?; info!( "new connection from {endpoint_id} with ALPN {}", @@ -354,10 +354,10 @@ async fn provide(endpoint: Endpoint, size: u64) -> Result<()> { // accept a bi-directional QUIC connection // use the `quinn` APIs to send and recv content - let (mut send, mut recv) = conn.accept_bi().await.e()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; tracing::debug!("accepted bi stream, waiting for data..."); - let message = recv.read_to_end(100).await.e()?; - let message = String::from_utf8(message).e()?; + let message = recv.read_to_end(100).await.anyerr()?; + let message = String::from_utf8(message).anyerr()?; println!("[{remote}] Received: \"{message}\""); let start = Instant::now(); @@ -386,7 +386,7 @@ async fn provide(endpoint: Endpoint, size: u64) -> Result<()> { } else { println!("[{remote}] Disconnected"); } - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) }); } @@ -407,12 +407,12 @@ async fn fetch(endpoint: Endpoint, remote_addr: EndpointAddr) -> Result<()> { let _guard = watch_conn_type(&endpoint, remote_id); // Use the Quinn API to send and recv content. - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; let message = format!("{me} is saying hello!"); - send.write_all(message.as_bytes()).await.e()?; + send.write_all(message.as_bytes()).await.anyerr()?; // Call `finish` to signal no more data will be sent on this stream. - send.finish().e()?; + send.finish().anyerr()?; println!("Sent: \"{message}\""); let (len, time_to_first_byte, chnk) = drain_stream(&mut recv, false).await?; @@ -421,7 +421,7 @@ async fn fetch(endpoint: Endpoint, remote_addr: EndpointAddr) -> Result<()> { // message to be sent. tokio::time::timeout(Duration::from_secs(3), endpoint.close()) .await - .e()?; + .anyerr()?; let duration = start.elapsed(); println!( @@ -448,7 +448,7 @@ async fn drain_stream( let mut num_chunks: u64 = 0; if read_unordered { - while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.e()? { + while let Some(chunk) = stream.read_chunk(usize::MAX, false).await.anyerr()? { if first_byte { time_to_first_byte = download_start.elapsed(); first_byte = false; @@ -470,7 +470,7 @@ async fn drain_stream( Bytes::new(), Bytes::new(), Bytes::new(), Bytes::new(), ]; - while let Some(n) = stream.read_chunks(&mut bufs[..]).await.e()? { + while let Some(n) = stream.read_chunks(&mut bufs[..]).await.anyerr()? { if first_byte { time_to_first_byte = download_start.elapsed(); first_byte = false; @@ -497,21 +497,21 @@ async fn send_data_on_stream( stream .write_chunk(bytes_data.clone()) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } if remaining != 0 { stream .write_chunk(bytes_data.slice(0..remaining)) .await - .context("failed sending data")?; + .std_context("failed sending data")?; } - stream.finish().context("failed finishing stream")?; + stream.finish().std_context("failed finishing stream")?; stream .stopped() .await - .context("failed to wait for stream to be stopped")?; + .std_context("failed to wait for stream to be stopped")?; Ok(()) } diff --git a/iroh/src/disco.rs b/iroh/src/disco.rs index 75e10aa7664..0e648b28db5 100644 --- a/iroh/src/disco.rs +++ b/iroh/src/disco.rs @@ -25,10 +25,9 @@ use std::{ use data_encoding::HEXLOWER; use iroh_base::{PublicKey, RelayUrl}; -use nested_enum_utils::common_fields; +use n0_error::{e, ensure, stack_error}; use rand::Rng; use serde::{Deserialize, Serialize}; -use snafu::{Snafu, ensure}; use url::Url; use crate::magicsock::transports; @@ -217,11 +216,11 @@ pub struct CallMeMaybe { impl Ping { fn from_bytes(p: &[u8]) -> Result { // Deliberately lax on longer-than-expected messages, for future compatibility. - ensure!(p.len() >= PING_LEN, TooShortSnafu); + ensure!(p.len() >= PING_LEN, ParseError::TooShort); let tx_id: [u8; TX_LEN] = p[..TX_LEN].try_into().expect("length checked"); let raw_key = &p[TX_LEN..TX_LEN + iroh_base::PublicKey::LENGTH]; let endpoint_key = - PublicKey::try_from(raw_key).map_err(|_| InvalidEncodingSnafu.build())?; + PublicKey::try_from(raw_key).map_err(|_| e!(ParseError::InvalidEncoding))?; let tx_id = TransactionId::from(tx_id); Ok(Ping { @@ -243,36 +242,31 @@ impl Ping { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum ParseError { - #[snafu(display("message is too short"))] - TooShort {}, - #[snafu(display("invalid encoding"))] - InvalidEncoding {}, - #[snafu(display("unknown format"))] - UnknownFormat {}, + #[error("message is too short")] + TooShort, + #[error("invalid encoding")] + InvalidEncoding, + #[error("unknown format")] + UnknownFormat, } fn send_addr_from_bytes(p: &[u8]) -> Result { - ensure!(p.len() > 2, TooShortSnafu); + ensure!(p.len() > 2, ParseError::TooShort); match p[0] { 0u8 => { - let bytes: [u8; EP_LENGTH] = p[1..].try_into().map_err(|_| TooShortSnafu.build())?; + let bytes: [u8; EP_LENGTH] = p[1..].try_into().map_err(|_| e!(ParseError::TooShort))?; let addr = socket_addr_from_bytes(bytes); Ok(SendAddr::Udp(addr)) } 1u8 => { - let s = std::str::from_utf8(&p[1..]).map_err(|_| InvalidEncodingSnafu.build())?; - let u: Url = s.parse().map_err(|_| InvalidEncodingSnafu.build())?; + let s = std::str::from_utf8(&p[1..]).map_err(|_| e!(ParseError::InvalidEncoding))?; + let u: Url = s.parse().map_err(|_| e!(ParseError::InvalidEncoding))?; Ok(SendAddr::Relay(u.into())) } - _ => Err(UnknownFormatSnafu.build()), + _ => Err(e!(ParseError::UnknownFormat)), } } @@ -318,7 +312,9 @@ fn socket_addr_as_bytes(addr: &SocketAddr) -> [u8; EP_LENGTH] { impl Pong { fn from_bytes(p: &[u8]) -> Result { - let tx_id: [u8; TX_LEN] = p[..TX_LEN].try_into().map_err(|_| TooShortSnafu.build())?; + let tx_id: [u8; TX_LEN] = p[..TX_LEN] + .try_into() + .map_err(|_| e!(ParseError::TooShort))?; let tx_id = TransactionId::from(tx_id); let src = send_addr_from_bytes(&p[TX_LEN..])?; @@ -342,7 +338,7 @@ impl Pong { impl CallMeMaybe { fn from_bytes(p: &[u8]) -> Result { - ensure!(p.len() % EP_LENGTH == 0, InvalidEncodingSnafu); + ensure!(p.len() % EP_LENGTH == 0, ParseError::InvalidEncoding); let num_entries = p.len() / EP_LENGTH; let mut m = CallMeMaybe { @@ -350,8 +346,9 @@ impl CallMeMaybe { }; for chunk in p.chunks_exact(EP_LENGTH) { - let bytes: [u8; EP_LENGTH] = - chunk.try_into().map_err(|_| InvalidEncodingSnafu.build())?; + let bytes: [u8; EP_LENGTH] = chunk + .try_into() + .map_err(|_| e!(ParseError::InvalidEncoding))?; let src = socket_addr_from_bytes(bytes); m.my_numbers.push(src); } @@ -380,11 +377,11 @@ impl CallMeMaybe { impl Message { /// Parses the encrypted part of the message from inside the nacl secretbox. pub fn from_bytes(p: &[u8]) -> Result { - ensure!(p.len() >= 2, TooShortSnafu); + ensure!(p.len() >= 2, ParseError::TooShort); - let t = MessageType::try_from(p[0]).map_err(|_| UnknownFormatSnafu.build())?; + let t = MessageType::try_from(p[0]).map_err(|_| e!(ParseError::UnknownFormat))?; let version = p[1]; - ensure!(version == V0, UnknownFormatSnafu); + ensure!(version == V0, ParseError::UnknownFormat); let p = &p[2..]; match t { diff --git a/iroh/src/discovery.rs b/iroh/src/discovery.rs index d2a27c7caac..9c19d958170 100644 --- a/iroh/src/discovery.rs +++ b/iroh/src/discovery.rs @@ -65,7 +65,7 @@ //! endpoint::RelayMode, //! }; //! -//! # async fn wrapper() -> n0_snafu::Result<()> { +//! # async fn wrapper() -> n0_error::Result<()> { //! let ep = Endpoint::empty_builder(RelayMode::Default) //! .discovery(PkarrPublisher::n0_dns()) //! .discovery(DnsDiscovery::n0_dns()) @@ -86,7 +86,7 @@ //! # Endpoint, SecretKey, //! # }; //! # -//! # async fn wrapper() -> n0_snafu::Result<()> { +//! # async fn wrapper() -> n0_error::Result<()> { //! let ep = Endpoint::empty_builder(RelayMode::Default) //! .discovery(PkarrPublisher::n0_dns()) //! .discovery(DnsDiscovery::n0_dns()) @@ -113,14 +113,13 @@ use std::sync::{Arc, RwLock}; use iroh_base::{EndpointAddr, EndpointId}; +use n0_error::{AnyError, e, ensure, stack_error}; use n0_future::{ boxed::BoxStream, stream::StreamExt, task::{self, AbortOnDropHandle}, time::{self, Duration}, }; -use nested_enum_utils::common_fields; -use snafu::{IntoError, Snafu, ensure}; use tokio::sync::oneshot; use tracing::{Instrument, debug, error_span, warn}; @@ -181,20 +180,14 @@ impl DynIntoDiscovery for T { } /// IntoDiscovery errors -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] -#[snafu(module)] pub enum IntoDiscoveryError { - #[snafu(display("Service '{provenance}' error"))] + #[error("Service '{provenance}' error")] User { provenance: &'static str, - source: Box, + source: AnyError, }, } @@ -204,7 +197,10 @@ impl IntoDiscoveryError { provenance: &'static str, source: T, ) -> Self { - into_discovery_error::UserSnafu { provenance }.into_error(Box::new(source)) + e!(IntoDiscoveryError::User { + provenance, + source: AnyError::from_std(source) + }) } /// Creates a new user error from an arbitrary boxed error type. @@ -212,46 +208,61 @@ impl IntoDiscoveryError { provenance: &'static str, source: Box, ) -> Self { - into_discovery_error::UserSnafu { provenance }.into_error(source) + e!(IntoDiscoveryError::User { + provenance, + source: AnyError::from_std_box(source) + }) } } /// Discovery errors -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum DiscoveryError { - #[snafu(display("No discovery service configured"))] - NoServiceConfigured {}, - #[snafu(display("Discovery produced no results for {}", endpoint_id.fmt_short()))] + #[error("No discovery service configured")] + NoServiceConfigured, + #[error("Discovery produced no results for {}", endpoint_id.fmt_short())] NoResults { endpoint_id: EndpointId }, - #[snafu(display("Service '{provenance}' error"))] + #[error("Service '{provenance}' error")] User { provenance: &'static str, - source: Box, + source: AnyError, }, } impl DiscoveryError { /// Creates a new user error from an arbitrary error type. + #[track_caller] pub fn from_err( provenance: &'static str, source: T, ) -> Self { - UserSnafu { provenance }.into_error(Box::new(source)) + e!(DiscoveryError::User { + provenance, + source: AnyError::from_std(source) + }) } /// Creates a new user error from an arbitrary boxed error type. + #[track_caller] pub fn from_err_box( provenance: &'static str, source: Box, ) -> Self { - UserSnafu { provenance }.into_error(source) + e!(DiscoveryError::User { + provenance, + source: AnyError::from_std_box(source) + }) + } + + /// Creates a new user error from an arbitrary error type that can be converted into [`AnyError`]. + #[track_caller] + pub fn from_err_any(provenance: &'static str, source: impl Into) -> Self { + e!(DiscoveryError::User { + provenance, + source: source.into() + }) } } @@ -505,7 +516,10 @@ pub(super) struct DiscoveryTask { impl DiscoveryTask { /// Starts a discovery task. pub(super) fn start(ep: Endpoint, endpoint_id: EndpointId) -> Result { - ensure!(!ep.discovery().is_empty(), NoServiceConfiguredSnafu); + ensure!( + !ep.discovery().is_empty(), + DiscoveryError::NoServiceConfigured + ); let (on_first_tx, on_first_rx) = oneshot::channel(); let me = ep.id(); let task = task::spawn( @@ -536,7 +550,10 @@ impl DiscoveryTask { if !ep.needs_discovery(endpoint_id, MAX_AGE) { return Ok(None); } - ensure!(!ep.discovery().is_empty(), NoServiceConfiguredSnafu); + ensure!( + !ep.discovery().is_empty(), + DiscoveryError::NoServiceConfigured + ); let (on_first_tx, on_first_rx) = oneshot::channel(); let ep = ep.clone(); let me = ep.id(); @@ -574,11 +591,14 @@ impl DiscoveryTask { ep: &Endpoint, endpoint_id: EndpointId, ) -> Result>, DiscoveryError> { - ensure!(!ep.discovery().is_empty(), NoServiceConfiguredSnafu); + ensure!( + !ep.discovery().is_empty(), + DiscoveryError::NoServiceConfigured + ); let stream = ep .discovery() .resolve(endpoint_id) - .ok_or(NoResultsSnafu { endpoint_id }.build())?; + .ok_or_else(|| e!(DiscoveryError::NoResults { endpoint_id }))?; Ok(stream) } @@ -623,7 +643,8 @@ impl DiscoveryTask { } } if let Some(tx) = on_first_tx.take() { - tx.send(Err(NoResultsSnafu { endpoint_id }.build())).ok(); + tx.send(Err(e!(DiscoveryError::NoResults { endpoint_id }))) + .ok(); } } } @@ -638,7 +659,7 @@ mod tests { }; use iroh_base::{EndpointAddr, SecretKey, TransportAddr}; - use n0_snafu::{Error, Result, ResultExt}; + use n0_error::{AnyError as Error, Result, StdResultExt}; use quinn::{IdleTimeout, TransportConfig}; use rand::{CryptoRng, Rng, SeedableRng}; use tokio_util::task::AbortOnDropHandle; @@ -814,7 +835,7 @@ mod tests { let _conn = ep2 .connect(ep1_addr, TEST_ALPN) .await - .context("connecting")?; + .std_context("connecting")?; Ok(()) } @@ -931,7 +952,7 @@ mod tests { // we skip accept() errors, they can be caused by retransmits while let Some(connecting) = ep.accept().await.and_then(|inc| inc.accept().ok()) { // Just accept incoming connections, but don't do anything with them. - let conn = connecting.await.context("connecting")?; + let conn = connecting.await.std_context("connecting")?; connections.push(conn); } @@ -958,8 +979,8 @@ mod tests { mod test_dns_pkarr { use iroh_base::{EndpointAddr, SecretKey, TransportAddr}; use iroh_relay::{RelayMap, endpoint_info::UserData}; + use n0_error::{AnyError as Error, Result, StackResultExt, StdResultExt}; use n0_future::time::Duration; - use n0_snafu::{Error, Result, ResultExt}; use rand::{CryptoRng, SeedableRng}; use tokio_util::task::AbortOnDropHandle; use tracing_test::traced_test; @@ -1092,7 +1113,7 @@ mod test_dns_pkarr { async move { // we skip accept() errors, they can be caused by retransmits while let Some(connecting) = ep.accept().await.and_then(|inc| inc.accept().ok()) { - let _conn = connecting.await.context("connecting")?; + let _conn = connecting.await.std_context("connecting")?; // Just accept incoming connections, but don't do anything with them. } diff --git a/iroh/src/discovery/dns.rs b/iroh/src/discovery/dns.rs index bcfe90cef74..52437dbd0ef 100644 --- a/iroh/src/discovery/dns.rs +++ b/iroh/src/discovery/dns.rs @@ -113,7 +113,7 @@ impl Discovery for DnsDiscovery { let endpoint_info = resolver .lookup_endpoint_by_id_staggered(&endpoint_id, &origin_domain, DNS_STAGGERING_MS) .await - .map_err(|e| DiscoveryError::from_err("dns", e))?; + .map_err(|e| DiscoveryError::from_err_any("dns", e))?; Ok(DiscoveryItem::new(endpoint_info, "dns", None)) }; let stream = n0_future::stream::once_future(fut); diff --git a/iroh/src/discovery/mdns.rs b/iroh/src/discovery/mdns.rs index 983a0dfa04d..e5ea36575ca 100644 --- a/iroh/src/discovery/mdns.rs +++ b/iroh/src/discovery/mdns.rs @@ -546,10 +546,9 @@ mod tests { /// tests) mod run_in_isolation { use iroh_base::{SecretKey, TransportAddr}; + use n0_error::{AnyError as Error, Result, StdResultExt, bail_any}; use n0_future::StreamExt; - use n0_snafu::{Error, Result, ResultExt}; use rand::{CryptoRng, SeedableRng}; - use snafu::whatever; use tracing_test::traced_test; use super::super::*; @@ -593,7 +592,7 @@ mod tests { .. } = tokio::time::timeout(Duration::from_secs(5), s1.next()) .await - .context("timeout")? + .std_context("timeout")? .unwrap() else { panic!("Received unexpected discovery event"); @@ -603,7 +602,7 @@ mod tests { .. } = tokio::time::timeout(Duration::from_secs(5), s2.next()) .await - .context("timeout")? + .std_context("timeout")? .unwrap() else { panic!("Received unexpected discovery event"); @@ -634,7 +633,7 @@ mod tests { loop { let event = tokio::time::timeout(Duration::from_secs(5), s1.next()) .await - .context("timeout")? + .std_context("timeout")? .expect("Stream should not be closed"); match event { @@ -655,7 +654,7 @@ mod tests { loop { let event = tokio::time::timeout(Duration::from_secs(10), s1.next()) .await - .context("timeout waiting for expiration event")? + .std_context("timeout waiting for expiration event")? .expect("Stream should not be closed"); match event { @@ -708,7 +707,7 @@ mod tests { got_ids.insert((endpoint_info.endpoint_id, data)); } } else { - whatever!( + bail_any!( "no more events, only got {} ids, expected {num_endpoints}\n", got_ids.len() ); @@ -719,7 +718,7 @@ mod tests { }; tokio::time::timeout(Duration::from_secs(5), test) .await - .context("timeout")? + .std_context("timeout")? } #[tokio::test] diff --git a/iroh/src/discovery/pkarr.rs b/iroh/src/discovery/pkarr.rs index e520049bc8f..67cecd0e4c3 100644 --- a/iroh/src/discovery/pkarr.rs +++ b/iroh/src/discovery/pkarr.rs @@ -48,6 +48,7 @@ use std::sync::Arc; use iroh_base::{EndpointId, RelayUrl, SecretKey}; use iroh_relay::endpoint_info::{EncodingError, EndpointInfo}; +use n0_error::{e, stack_error}; use n0_future::{ boxed::BoxStream, task::{self, AbortOnDropHandle}, @@ -58,7 +59,6 @@ use pkarr::{ SignedPacket, errors::{PublicKeyError, SignedPacketVerifyError}, }; -use snafu::{ResultExt, Snafu}; use tracing::{Instrument, debug, error_span, warn}; use url::Url; @@ -76,28 +76,40 @@ use crate::{ pub mod dht; #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum PkarrError { - #[snafu(display("Invalid public key"))] - PublicKey { source: PublicKeyError }, - #[snafu(display("Packet failed to verify"))] - Verify { source: SignedPacketVerifyError }, - #[snafu(display("Invalid relay URL"))] + #[error("Invalid public key")] + PublicKey { + #[error(std_err)] + source: PublicKeyError, + }, + #[error("Packet failed to verify")] + Verify { + #[error(std_err)] + source: SignedPacketVerifyError, + }, + #[error("Invalid relay URL")] InvalidRelayUrl { url: RelayUrl }, - #[snafu(display("Error sending http request"))] - HttpSend { source: reqwest::Error }, - #[snafu(display("Error resolving http request"))] + #[error("Error sending http request")] + HttpSend { + #[error(std_err)] + source: reqwest::Error, + }, + #[error("Error resolving http request")] HttpRequest { status: reqwest::StatusCode }, - #[snafu(display("Http payload error"))] - HttpPayload { source: reqwest::Error }, - #[snafu(display("EncodingError"))] + #[error("Http payload error")] + HttpPayload { + #[error(std_err)] + source: reqwest::Error, + }, + #[error("EncodingError")] Encoding { source: EncodingError }, } impl From for DiscoveryError { fn from(err: PkarrError) -> Self { - DiscoveryError::from_err("pkarr", err) + DiscoveryError::from_err_any("pkarr", err) } } @@ -394,7 +406,7 @@ impl PublisherService { ); let signed_packet = info .to_pkarr_signed_packet(&self.secret_key, self.ttl) - .context(EncodingSnafu)?; + .map_err(|err| e!(PkarrError::Encoding, err))?; self.pkarr_client.publish(&signed_packet).await?; Ok(()) } @@ -504,7 +516,7 @@ impl Discovery for PkarrResolver { let fut = async move { let signed_packet = pkarr_client.resolve(endpoint_id).await?; let info = EndpointInfo::from_pkarr_signed_packet(&signed_packet) - .map_err(|err| DiscoveryError::from_err("pkarr", err))?; + .map_err(|err| DiscoveryError::from_err_any("pkarr", err))?; let item = DiscoveryItem::new(info, "pkarr", None); Ok(item) }; @@ -549,16 +561,15 @@ impl PkarrRelayClient { /// Resolves a [`SignedPacket`] for the given [`EndpointId`]. pub async fn resolve(&self, endpoint_id: EndpointId) -> Result { // We map the error to string, as in browsers the error is !Send - let public_key = - pkarr::PublicKey::try_from(endpoint_id.as_bytes()).context(PublicKeySnafu)?; + let public_key = pkarr::PublicKey::try_from(endpoint_id.as_bytes()) + .map_err(|err| e!(PkarrError::PublicKey, err))?; let mut url = self.pkarr_relay_url.clone(); url.path_segments_mut() .map_err(|_| { - InvalidRelayUrlSnafu { - url: self.pkarr_relay_url.clone(), - } - .build() + e!(PkarrError::InvalidRelayUrl { + url: self.pkarr_relay_url.clone().into() + }) })? .push(&public_key.to_z32()); @@ -567,20 +578,22 @@ impl PkarrRelayClient { .get(url) .send() .await - .context(HttpSendSnafu)?; + .map_err(|err| e!(PkarrError::HttpSend, err))?; if !response.status().is_success() { - return Err(HttpRequestSnafu { - status: response.status(), - } - .build() + return Err(e!(PkarrError::HttpRequest { + status: response.status() + }) .into()); } - let payload = response.bytes().await.context(HttpPayloadSnafu)?; + let payload = response + .bytes() + .await + .map_err(|source| e!(PkarrError::HttpPayload { source }))?; // We map the error to string, as in browsers the error is !Send - let packet = - SignedPacket::from_relay_payload(&public_key, &payload).context(VerifySnafu)?; + let packet = SignedPacket::from_relay_payload(&public_key, &payload) + .map_err(|err| e!(PkarrError::Verify, err))?; Ok(packet) } @@ -589,10 +602,9 @@ impl PkarrRelayClient { let mut url = self.pkarr_relay_url.clone(); url.path_segments_mut() .map_err(|_| { - InvalidRelayUrlSnafu { - url: self.pkarr_relay_url.clone(), - } - .build() + e!(PkarrError::InvalidRelayUrl { + url: self.pkarr_relay_url.clone().into() + }) })? .push(&signed_packet.public_key().to_z32()); @@ -602,13 +614,12 @@ impl PkarrRelayClient { .body(signed_packet.to_relay_payload()) .send() .await - .context(HttpSendSnafu)?; + .map_err(|source| e!(PkarrError::HttpSend { source }))?; if !response.status().is_success() { - return Err(HttpRequestSnafu { - status: response.status(), - } - .build()); + return Err(e!(PkarrError::HttpRequest { + status: response.status() + })); } Ok(()) diff --git a/iroh/src/discovery/pkarr/dht.rs b/iroh/src/discovery/pkarr/dht.rs index e9d6efc9985..720e66e5bf5 100644 --- a/iroh/src/discovery/pkarr/dht.rs +++ b/iroh/src/discovery/pkarr/dht.rs @@ -333,7 +333,7 @@ mod tests { use std::collections::BTreeSet; use iroh_base::RelayUrl; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use tracing_test::traced_test; use super::*; @@ -343,17 +343,17 @@ mod tests { #[traced_test] async fn dht_discovery_smoke() -> Result { let secret = SecretKey::generate(&mut rand::rng()); - let testnet = pkarr::mainline::Testnet::new_async(3).await.e()?; + let testnet = pkarr::mainline::Testnet::new_async(3).await.anyerr()?; let client = pkarr::Client::builder() .dht(|builder| builder.bootstrap(&testnet.bootstrap)) .build() - .e()?; + .anyerr()?; let discovery = DhtDiscovery::builder() .secret_key(secret.clone()) .client(client) .build()?; - let relay_url: RelayUrl = Url::parse("https://example.com").e()?.into(); + let relay_url: RelayUrl = Url::parse("https://example.com").anyerr()?.into(); let data = EndpointData::default().with_relay_url(Some(relay_url.clone())); discovery.publish(&data); diff --git a/iroh/src/discovery/static_provider.rs b/iroh/src/discovery/static_provider.rs index a1764026039..05e16b85d69 100644 --- a/iroh/src/discovery/static_provider.rs +++ b/iroh/src/discovery/static_provider.rs @@ -41,7 +41,7 @@ use super::{Discovery, DiscoveryError, DiscoveryItem, EndpointData, EndpointInfo /// use iroh_base::SecretKey; /// /// # #[tokio::main] -/// # async fn main() -> n0_snafu::Result<()> { +/// # async fn main() -> n0_error::Result<()> { /// // Create the discovery service and endpoint. /// let discovery = StaticProvider::new(); /// @@ -127,7 +127,7 @@ impl StaticProvider { /// # Vec::new() /// # } /// # #[tokio::main] - /// # async fn main() -> n0_snafu::Result<()> { + /// # async fn main() -> n0_error::Result<()> { /// // get addrs from somewhere /// let addrs = get_addrs(); /// @@ -233,7 +233,7 @@ impl Discovery for StaticProvider { #[cfg(test)] mod tests { use iroh_base::{EndpointAddr, SecretKey, TransportAddr}; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StackResultExt}; use super::*; use crate::{Endpoint, RelayMode}; diff --git a/iroh/src/endpoint.rs b/iroh/src/endpoint.rs index 0b2e6cc2211..8950fba0769 100644 --- a/iroh/src/endpoint.rs +++ b/iroh/src/endpoint.rs @@ -23,11 +23,10 @@ use std::{ use ed25519_dalek::{VerifyingKey, pkcs8::DecodePublicKey}; use iroh_base::{EndpointAddr, EndpointId, RelayUrl, SecretKey, TransportAddr}; use iroh_relay::{RelayConfig, RelayMap}; +use n0_error::{e, ensure, stack_error}; use n0_future::time::Duration; use n0_watcher::Watcher; -use nested_enum_utils::common_fields; use pin_project::pin_project; -use snafu::{ResultExt, Snafu, ensure}; use tracing::{debug, instrument, trace, warn}; use url::Url; @@ -41,7 +40,7 @@ use crate::{ UserData, }, endpoint::presets::Preset, - magicsock::{self, EndpointIdMappedAddr, Handle, OwnAddressSnafu}, + magicsock::{self, EndpointIdMappedAddr, Handle}, metrics::EndpointMetrics, net_report::Report, tls::{self, DEFAULT_MAX_TLS_TICKETS}, @@ -482,80 +481,59 @@ pub struct Endpoint { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum ConnectWithOptsError { - #[snafu(transparent)] + #[error(transparent)] AddEndpointAddr { source: AddEndpointAddrError }, - #[snafu(display("Connecting to ourself is not supported"))] - SelfConnect {}, - #[snafu(display("No addressing information available"))] + #[error("Connecting to ourself is not supported")] + SelfConnect, + #[error("No addressing information available")] NoAddress { source: GetMappingAddressError }, - #[snafu(display("Unable to connect to remote"))] - Quinn { source: quinn::ConnectError }, + #[error("Unable to connect to remote")] + Quinn { + #[error(std_err)] + source: quinn_proto::ConnectError, + }, } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum ConnectError { - #[snafu(transparent)] - Connect { - #[snafu(source(from(ConnectWithOptsError, Box::new)))] - source: Box, - }, - #[snafu(transparent)] + #[error(transparent)] + Connect { source: ConnectWithOptsError }, + #[error(transparent)] Connection { - #[snafu(source(from(ConnectionError, Box::new)))] - source: Box, + #[error(std_err)] + source: ConnectionError, }, } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] #[non_exhaustive] pub enum BindError { - #[snafu(transparent)] + #[error(transparent)] MagicSpawn { source: magicsock::CreateHandleError, }, - #[snafu(transparent)] + #[error(transparent)] Discovery { source: crate::discovery::IntoDiscoveryError, }, } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum GetMappingAddressError { - #[snafu(display("Discovery service required due to missing addressing information"))] + #[error("Discovery service required due to missing addressing information")] DiscoveryStart { source: DiscoveryError }, - #[snafu(display("Discovery service failed"))] + #[error("Discovery service failed")] Discover { source: DiscoveryError }, - #[snafu(display("No addressing information found"))] - NoAddress {}, + #[error("No addressing information found")] + NoAddress, } impl Endpoint { @@ -691,7 +669,10 @@ impl Endpoint { ); // Connecting to ourselves is not supported. - ensure!(endpoint_addr.id != self.id(), SelfConnectSnafu); + ensure!( + endpoint_addr.id != self.id(), + ConnectWithOptsError::SelfConnect + ); if !endpoint_addr.is_empty() { self.add_endpoint_addr(endpoint_addr.clone(), Source::App)?; @@ -706,8 +687,7 @@ impl Endpoint { // still running task. let (mapped_addr, _discovery_drop_guard) = self .get_mapping_addr_and_maybe_start_discovery(endpoint_addr) - .await - .context(NoAddressSnafu)?; + .await?; let transport_config = options .transport_config @@ -735,15 +715,11 @@ impl Endpoint { }; let server_name = &tls::name::encode(endpoint_id); - let connect = self - .msock - .endpoint() - .connect_with( - client_config, - mapped_addr.private_socket_addr(), - server_name, - ) - .context(QuinnSnafu)?; + let connect = self.msock.endpoint().connect_with( + client_config, + mapped_addr.private_socket_addr(), + server_name, + )?; Ok(Connecting { inner: connect, @@ -798,7 +774,10 @@ impl Endpoint { source: Source, ) -> Result<(), AddEndpointAddrError> { // Connecting to ourselves is not supported. - snafu::ensure!(endpoint_addr.id != self.id(), OwnAddressSnafu); + ensure!( + endpoint_addr.id != self.id(), + AddEndpointAddrError::OwnAddress + ); self.msock.add_endpoint_addr(endpoint_addr, source) } @@ -838,7 +817,7 @@ impl Endpoint { /// The observed [`EndpointAddr`] will have the current [`RelayUrl`] and direct addresses. /// /// ```no_run - /// # async fn wrapper() -> n0_snafu::Result { + /// # async fn wrapper() -> n0_error::Result<()> { /// use iroh::{Endpoint, Watcher}; /// /// let endpoint = Endpoint::builder() @@ -1030,7 +1009,7 @@ impl Endpoint { /// ```rust /// # use std::collections::BTreeMap; /// # use iroh::endpoint::Endpoint; - /// # async fn wrapper() -> n0_snafu::Result { + /// # async fn wrapper() -> n0_error::Result<()> { /// let endpoint = Endpoint::bind().await?; /// assert_eq!(endpoint.metrics().magicsock.recv_datagrams.get(), 0); /// # Ok(()) @@ -1048,7 +1027,7 @@ impl Endpoint { /// # use std::collections::BTreeMap; /// # use iroh_metrics::{Metric, MetricsGroup, MetricValue, MetricsGroupSet}; /// # use iroh::endpoint::Endpoint; - /// # async fn wrapper() -> n0_snafu::Result { + /// # async fn wrapper() -> n0_error::Result<()> { /// let endpoint = Endpoint::bind().await?; /// let metrics: BTreeMap = endpoint /// .metrics() @@ -1071,7 +1050,7 @@ impl Endpoint { /// ```rust /// # use iroh_metrics::{Registry, MetricsSource}; /// # use iroh::endpoint::Endpoint; - /// # async fn wrapper() -> n0_snafu::Result { + /// # async fn wrapper() -> n0_error::Result<()> { /// let endpoint = Endpoint::bind().await?; /// let mut registry = Registry::default(); /// registry.register_all(endpoint.metrics()); @@ -1094,8 +1073,8 @@ impl Endpoint { /// # use std::{sync::{Arc, RwLock}, time::Duration}; /// # use iroh_metrics::{Registry, MetricsSource}; /// # use iroh::endpoint::Endpoint; - /// # use n0_snafu::ResultExt; - /// # async fn wrapper() -> n0_snafu::Result { + /// # use n0_error::{StackResultExt, StdResultExt}; + /// # async fn wrapper() -> n0_error::Result<()> { /// // Create a registry, wrapped in a read-write lock so that we can register and serve /// // the metrics independently. /// let registry = Arc::new(RwLock::new(Registry::default())); @@ -1116,10 +1095,10 @@ impl Endpoint { /// tokio::time::sleep(Duration::from_millis(500)); /// let res = reqwest::get("http://localhost:9100/metrics") /// .await - /// .context("get")? + /// .std_context("get")? /// .text() /// .await - /// .context("text")?; + /// .std_context("text")?; /// /// assert!(res.contains(r#"TYPE magicsock_recv_datagrams counter"#)); /// assert!(res.contains(r#"magicsock_recv_datagrams_total 0"#)); @@ -1290,15 +1269,16 @@ impl Endpoint { // only then continue, because otherwise we wouldn't have any // path to the remote endpoint. let res = DiscoveryTask::start(self.clone(), endpoint_id); - let mut discovery = res.context(get_mapping_address_error::DiscoveryStartSnafu)?; + let mut discovery = + res.map_err(|err| e!(GetMappingAddressError::DiscoveryStart, err))?; discovery .first_arrived() .await - .context(get_mapping_address_error::DiscoverSnafu)?; + .map_err(|err| e!(GetMappingAddressError::Discover, err))?; if let Some(addr) = self.msock.get_mapping_addr(endpoint_id) { Ok((addr, Some(discovery))) } else { - Err(get_mapping_address_error::NoAddressSnafu.build()) + Err(e!(GetMappingAddressError::NoAddress)) } } } @@ -1526,20 +1506,18 @@ pub struct Connecting { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] pub enum AlpnError { - #[snafu(transparent)] - ConnectionError { source: ConnectionError }, - #[snafu(display("No ALPN available"))] - Unavailable {}, - #[snafu(display("Unknown handshake type"))] - UnknownHandshake {}, + #[error(transparent)] + ConnectionError { + #[error(std_err)] + source: ConnectionError, + }, + #[error("No ALPN available")] + Unavailable, + #[error("Unknown handshake type")] + UnknownHandshake, } impl Connecting { @@ -1626,9 +1604,9 @@ impl Connecting { match data.downcast::() { Ok(data) => match data.protocol { Some(protocol) => Ok(protocol), - None => Err(UnavailableSnafu.build()), + None => Err(e!(AlpnError::Unavailable)), }, - Err(_) => Err(UnknownHandshakeSnafu.build()), + Err(_) => Err(e!(AlpnError::UnknownHandshake)), } } } @@ -1694,11 +1672,9 @@ pub struct Connection { } #[allow(missing_docs)] -#[derive(Debug, Snafu)] -#[snafu(display("Protocol error: no remote id available"))] -pub struct RemoteEndpointIdError { - backtrace: Option, -} +#[stack_error(derive, add_meta)] +#[error("Protocol error: no remote id available")] +pub struct RemoteEndpointIdError; impl Connection { /// Initiates a new outgoing unidirectional stream. @@ -1920,7 +1896,7 @@ impl Connection { match data { None => { warn!("no peer certificate found"); - Err(RemoteEndpointIdSnafu.build()) + Err(RemoteEndpointIdError::new()) } Some(data) => match data.downcast::>() { Ok(certs) => { @@ -1929,19 +1905,19 @@ impl Connection { "expected a single peer certificate, but {} found", certs.len() ); - return Err(RemoteEndpointIdSnafu.build()); + return Err(RemoteEndpointIdError::new()); } let peer_id = EndpointId::from_verifying_key( VerifyingKey::from_public_key_der(&certs[0]) - .map_err(|_| RemoteEndpointIdSnafu.build())?, + .map_err(|_| RemoteEndpointIdError::new())?, ); Ok(peer_id) } Err(err) => { warn!("invalid peer certificate: {:?}", err); - Err(RemoteEndpointIdSnafu.build()) + Err(RemoteEndpointIdError::new()) } }, } @@ -2128,8 +2104,8 @@ mod tests { use std::time::{Duration, Instant}; use iroh_base::{EndpointAddr, EndpointId, SecretKey, TransportAddr}; + use n0_error::{AnyError as Error, Result, StdResultExt}; use n0_future::{BufferedStreamExt, StreamExt, stream, task::AbortOnDropHandle}; - use n0_snafu::{Error, Result, ResultExt}; use n0_watcher::Watcher; use quinn::ConnectionError; use rand::SeedableRng; @@ -2184,11 +2160,11 @@ mod tests { let server = tokio::spawn( async move { info!("accepting connection"); - let incoming = ep.accept().await.e()?; - let conn = incoming.await.e()?; - let mut stream = conn.accept_uni().await.e()?; + let incoming = ep.accept().await.anyerr()?; + let conn = incoming.await.anyerr()?; + let mut stream = conn.accept_uni().await.anyerr()?; let mut buf = [0u8; 5]; - stream.read_exact(&mut buf).await.e()?; + stream.read_exact(&mut buf).await.anyerr()?; info!("Accepted 1 stream, received {buf:?}. Closing now."); // close the connection conn.close(7u8.into(), b"bye"); @@ -2219,13 +2195,13 @@ mod tests { info!("client connecting"); let endpoint_addr = EndpointAddr::new(server_peer_id).with_relay_url(relay_url); let conn = ep.connect(endpoint_addr, TEST_ALPN).await?; - let mut stream = conn.open_uni().await.e()?; + let mut stream = conn.open_uni().await.anyerr()?; // First write is accepted by server. We need this bit of synchronisation // because if the server closes after simply accepting the connection we can // not be sure our .open_uni() call would succeed as it may already receive // the error. - stream.write_all(b"hello").await.e()?; + stream.write_all(b"hello").await.anyerr()?; info!("waiting for closed"); // Remote now closes the connection, we should see an error sometime soon. @@ -2251,9 +2227,9 @@ mod tests { n0_future::future::zip(server, client), ) .await - .e()?; - server.e()??; - client.e()??; + .anyerr()?; + server.anyerr()??; + client.anyerr()??; Ok(()) } @@ -2290,17 +2266,17 @@ mod tests { for i in 0..n_clients { let round_start = Instant::now(); info!("[server] round {i}"); - let incoming = ep.accept().await.e()?; - let conn = incoming.await.e()?; + let incoming = ep.accept().await.anyerr()?; + let conn = incoming.await.anyerr()?; let endpoint_id = conn.remote_id()?; info!(%i, peer = %endpoint_id.fmt_short(), "accepted connection"); - let (mut send, mut recv) = conn.accept_bi().await.e()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; let mut buf = vec![0u8; chunk_size]; for _i in 0..n_chunks_per_client { - recv.read_exact(&mut buf).await.e()?; - send.write_all(&buf).await.e()?; + recv.read_exact(&mut buf).await.anyerr()?; + send.write_all(&buf).await.anyerr()?; } - send.finish().e()?; + send.finish().anyerr()?; conn.closed().await; // we're the last to send data, so we wait for the other side to close info!(%i, peer = %endpoint_id.fmt_short(), "finished"); info!("[server] round {i} done in {:?}", round_start.elapsed()); @@ -2330,14 +2306,14 @@ mod tests { let endpoint_addr = EndpointAddr::new(server_endpoint_id).with_relay_url(relay_url.clone()); info!(to = ?endpoint_addr, "client connecting"); - let conn = ep.connect(endpoint_addr, TEST_ALPN).await.e()?; + let conn = ep.connect(endpoint_addr, TEST_ALPN).await.anyerr()?; info!("client connected"); - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; for i in 0..n_chunks_per_client { let mut buf = vec![i; chunk_size]; - send.write_all(&buf).await.e()?; - recv.read_exact(&mut buf).await.e()?; + send.write_all(&buf).await.anyerr()?; + recv.read_exact(&mut buf).await.anyerr()?; assert_eq!(buf, vec![i; chunk_size]); } // we're the last to receive data, so we close @@ -2352,7 +2328,7 @@ mod tests { info!("[client] round {i} done in {:?}", round_start.elapsed()); } - server.await.e()??; + server.await.anyerr()??; // We appear to have seen this being very slow at times. So ensure we fail if this // test is too slow. We're only making two connections transferring very little @@ -2382,13 +2358,13 @@ mod tests { let server = server.clone(); async move { let Some(conn) = server.accept().await else { - snafu::whatever!("Expected an incoming connection"); + n0_error::bail_any!("Expected an incoming connection"); }; - let conn = conn.await.e()?; - let (mut send, mut recv) = conn.accept_bi().await.e()?; - let data = recv.read_to_end(1000).await.e()?; - send.write_all(&data).await.e()?; - send.finish().e()?; + let conn = conn.await.anyerr()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; + let data = recv.read_to_end(1000).await.anyerr()?; + send.write_all(&data).await.anyerr()?; + send.finish().anyerr()?; conn.closed().await; Ok::<_, Error>(()) @@ -2397,13 +2373,13 @@ mod tests { let addr = server.addr(); let conn = client.connect(addr, TEST_ALPN).await?; - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"Hello, world!").await.e()?; - send.finish().e()?; - let data = recv.read_to_end(1000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"Hello, world!").await.anyerr()?; + send.finish().anyerr()?; + let data = recv.read_to_end(1000).await.anyerr()?; conn.close(0u32.into(), b"bye!"); - task.await.e()??; + task.await.anyerr()??; client.close().await; server.close().await; @@ -2433,13 +2409,13 @@ mod tests { for i in 0..2 { println!("accept: round {i}"); let Some(conn) = server.accept().await else { - snafu::whatever!("Expected an incoming connection"); + n0_error::bail_any!("Expected an incoming connection"); }; - let conn = conn.await.e()?; - let (mut send, mut recv) = conn.accept_bi().await.e()?; - let data = recv.read_to_end(1000).await.e()?; - send.write_all(&data).await.e()?; - send.finish().e()?; + let conn = conn.await.anyerr()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; + let data = recv.read_to_end(1000).await.anyerr()?; + send.write_all(&data).await.anyerr()?; + send.finish().anyerr()?; conn.closed().await; } Ok::<_, Error>(()) @@ -2456,10 +2432,10 @@ mod tests { .retain(|addr| !matches!(addr, TransportAddr::Ip(_))); let conn = client.connect(addr, TEST_ALPN).await?; - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"Hello, world!").await.e()?; - send.finish().e()?; - let data = recv.read_to_end(1000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"Hello, world!").await.anyerr()?; + send.finish().anyerr()?; + let data = recv.read_to_end(1000).await.anyerr()?; conn.close(0u32.into(), b"bye!"); assert_eq!(&data, b"Hello, world!"); @@ -2496,7 +2472,7 @@ mod tests { panic!("failed to change relay"); }) .await - .e()?; + .anyerr()?; println!("round2: {:?}", addr); assert_eq!(addr.relay_urls().next(), Some(&new_relay_url)); @@ -2506,13 +2482,13 @@ mod tests { .retain(|addr| !matches!(addr, TransportAddr::Ip(_))); let conn = client.connect(addr, TEST_ALPN).await?; - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"Hello, world!").await.e()?; - send.finish().e()?; - let data = recv.read_to_end(1000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"Hello, world!").await.anyerr()?; + send.finish().anyerr()?; + let data = recv.read_to_end(1000).await.anyerr()?; conn.close(0u32.into(), b"bye!"); - task.await.e()??; + task.await.anyerr()??; client.close().await; server.close().await; @@ -2548,32 +2524,32 @@ mod tests { async fn connect_hello(ep: Endpoint, dst: EndpointId) -> Result { let conn = ep.connect(dst, TEST_ALPN).await?; - let (mut send, mut recv) = conn.open_bi().await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; info!("sending hello"); - send.write_all(b"hello").await.e()?; - send.finish().e()?; + send.write_all(b"hello").await.anyerr()?; + send.finish().anyerr()?; info!("receiving world"); - let m = recv.read_to_end(100).await.e()?; + let m = recv.read_to_end(100).await.anyerr()?; assert_eq!(m, b"world"); conn.close(1u8.into(), b"done"); Ok(()) } async fn accept_world(ep: Endpoint, src: EndpointId) -> Result { - let incoming = ep.accept().await.e()?; - let mut iconn = incoming.accept().e()?; + let incoming = ep.accept().await.anyerr()?; + let mut iconn = incoming.accept().anyerr()?; let alpn = iconn.alpn().await?; - let conn = iconn.await.e()?; + let conn = iconn.await.anyerr()?; let endpoint_id = conn.remote_id()?; assert_eq!(endpoint_id, src); assert_eq!(alpn, TEST_ALPN); - let (mut send, mut recv) = conn.accept_bi().await.e()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; info!("receiving hello"); - let m = recv.read_to_end(100).await.e()?; + let m = recv.read_to_end(100).await.anyerr()?; assert_eq!(m, b"hello"); info!("sending hello"); - send.write_all(b"world").await.e()?; - send.finish().e()?; + send.write_all(b"world").await.anyerr()?; + send.finish().anyerr()?; match conn.closed().await { ConnectionError::ApplicationClosed(closed) => { assert_eq!(closed.error_code, 1u8.into()); @@ -2612,10 +2588,10 @@ mod tests { ), )); - p1_accept.await.e()??; - p2_accept.await.e()??; - p1_connect.await.e()??; - p2_connect.await.e()??; + p1_accept.await.anyerr()??; + p2_accept.await.anyerr()??; + p1_connect.await.anyerr()??; + p2_connect.await.anyerr()??; Ok(()) } @@ -2654,12 +2630,12 @@ mod tests { return Ok(()); } } - snafu::whatever!("conn_type stream ended before `ConnectionType::Direct`"); + n0_error::bail_any!("conn_type stream ended before `ConnectionType::Direct`"); } async fn accept(ep: &Endpoint) -> Result { let incoming = ep.accept().await.expect("ep closed"); - let conn = incoming.await.e()?; + let conn = incoming.await.anyerr()?; let endpoint_id = conn.remote_id()?; tracing::info!(endpoint_id=%endpoint_id.fmt_short(), "accepted connection"); Ok(conn) @@ -2677,19 +2653,19 @@ mod tests { let ep1_side = tokio::time::timeout(TIMEOUT, async move { let conn = accept(&ep1).await?; - let mut send = conn.open_uni().await.e()?; + let mut send = conn.open_uni().await.anyerr()?; wait_for_conn_type_direct(&ep1, ep2_endpointid).await?; - send.write_all(b"Conn is direct").await.e()?; - send.finish().e()?; + send.write_all(b"Conn is direct").await.anyerr()?; + send.finish().anyerr()?; conn.closed().await; Ok::<(), Error>(()) }); let ep2_side = tokio::time::timeout(TIMEOUT, async move { let conn = ep2.connect(ep1_endpointaddr, TEST_ALPN).await?; - let mut recv = conn.accept_uni().await.e()?; + let mut recv = conn.accept_uni().await.anyerr()?; wait_for_conn_type_direct(&ep2, ep1_endpointid).await?; - let read = recv.read_to_end(100).await.e()?; + let read = recv.read_to_end(100).await.anyerr()?; assert_eq!(read, b"Conn is direct".to_vec()); conn.close(0u32.into(), b"done"); conn.closed().await; @@ -2699,9 +2675,9 @@ mod tests { let res_ep1 = AbortOnDropHandle::new(tokio::spawn(ep1_side)); let res_ep2 = AbortOnDropHandle::new(tokio::spawn(ep2_side)); - let (r1, r2) = tokio::try_join!(res_ep1, res_ep2).e()?; - r1.e()??; - r2.e()??; + let (r1, r2) = tokio::try_join!(res_ep1, res_ep2).anyerr()?; + r1.anyerr()??; + r2.anyerr()??; Ok(()) } @@ -2813,12 +2789,12 @@ mod tests { .into_0rtt() .expect_err("expected 0rtt to fail") .await - .e()?; + .anyerr()?; - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"hello").await.e()?; - send.finish().e()?; - let received = recv.read_to_end(1_000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"hello").await.anyerr()?; + send.finish().anyerr()?; + let received = recv.read_to_end(1_000).await.anyerr()?; assert_eq!(&received, b"hello"); conn.close(0u32.into(), b"thx"); Ok(()) @@ -2835,13 +2811,13 @@ mod tests { .await? .into_0rtt() .ok() - .e()?; + .anyerr()?; tracing::trace!("Client established 0-RTT connection"); // This is how we send data in 0-RTT: - let (mut send, recv) = conn.open_bi().await.e()?; - send.write_all(b"hello").await.e()?; - send.finish().e()?; + let (mut send, recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"hello").await.anyerr()?; + send.finish().anyerr()?; tracing::trace!("Client sent 0-RTT data, waiting for server response"); // When this resolves, we've gotten a response from the server about whether the 0-RTT data above was accepted: let accepted = accepted_0rtt.await; @@ -2851,12 +2827,12 @@ mod tests { recv } else { // in this case we need to re-send data by re-creating the connection. - let (mut send, recv) = conn.open_bi().await.e()?; - send.write_all(b"hello").await.e()?; - send.finish().e()?; + let (mut send, recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"hello").await.anyerr()?; + send.finish().anyerr()?; recv }; - let received = recv.read_to_end(1_000).await.e()?; + let received = recv.read_to_end(1_000).await.anyerr()?; assert_eq!(&received, b"hello"); conn.close(0u32.into(), b"thx"); Ok(()) @@ -2943,25 +2919,25 @@ mod tests { .await?; let server_addr = server.addr(); let server_task = tokio::spawn(async move { - let incoming = server.accept().await.e()?; - let conn = incoming.await.e()?; - let (mut send, mut recv) = conn.accept_bi().await.e()?; - let msg = recv.read_to_end(1_000).await.e()?; - send.write_all(&msg).await.e()?; - send.finish().e()?; + let incoming = server.accept().await.anyerr()?; + let conn = incoming.await.anyerr()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; + let msg = recv.read_to_end(1_000).await.anyerr()?; + send.write_all(&msg).await.anyerr()?; + send.finish().anyerr()?; let close_reason = conn.closed().await; Ok::<_, Error>(close_reason) }); let conn = client.connect(server_addr, TEST_ALPN).await?; - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"Hello, world!").await.e()?; - send.finish().e()?; - recv.read_to_end(1_000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"Hello, world!").await.anyerr()?; + send.finish().anyerr()?; + recv.read_to_end(1_000).await.anyerr()?; conn.close(42u32.into(), b"thanks, bye!"); client.close().await; - let close_err = server_task.await.e()??; + let close_err = server_task.await.anyerr()??; let ConnectionError::ApplicationClosed(app_close) = close_err else { panic!("Unexpected close reason: {close_err:?}"); }; @@ -2991,19 +2967,19 @@ mod tests { .await?; let server_addr = server.addr(); let server_task = tokio::task::spawn(async move { - let conn = server.accept().await.e()?.accept().e()?.await.e()?; - let mut uni = conn.accept_uni().await.e()?; - uni.read_to_end(10).await.e()?; + let conn = server.accept().await.anyerr()?.await.anyerr()?; + let mut uni = conn.accept_uni().await.anyerr()?; + uni.read_to_end(10).await.anyerr()?; drop(conn); Ok::<_, Error>(server) }); let conn = client.connect(server_addr, TEST_ALPN).await?; - let mut uni = conn.open_uni().await.e()?; - uni.write_all(b"helloworld").await.e()?; - uni.finish().e()?; + let mut uni = conn.open_uni().await.anyerr()?; + uni.write_all(b"helloworld").await.anyerr()?; + uni.finish().anyerr()?; conn.closed().await; drop(conn); - let server = server_task.await.e()??; + let server = server_task.await.anyerr()??; let m = client.metrics(); assert_eq!(m.magicsock.num_direct_conns_added.get(), 1); @@ -3028,7 +3004,7 @@ mod tests { let mut registry = Registry::default(); register_endpoint(&mut registry, &client); register_endpoint(&mut registry, &server); - let s = registry.encode_openmetrics_to_string()?; + let s = registry.encode_openmetrics_to_string().anyerr()?; assert!(s.contains(r#"magicsock_endpoints_contacted_directly_total{id="3b6a27bcce"} 1"#)); assert!(s.contains(r#"magicsock_endpoints_contacted_directly_total{id="8a88e3dd74"} 1"#)); Ok(()) @@ -3050,10 +3026,10 @@ mod tests { let server_task = tokio::spawn({ let server = server.clone(); async move { - let incoming = server.accept().await.e()?; - let conn = incoming.await.e()?; + let incoming = server.accept().await.anyerr()?; + let conn = incoming.await.anyerr()?; conn.close(0u32.into(), b"bye!"); - Ok::<_, n0_snafu::Error>(conn.alpn()) + n0_error::Ok(conn.alpn()) } }); @@ -3064,13 +3040,13 @@ mod tests { ConnectOptions::new().with_additional_alpns(secondary_connect_alpns), ) .await?; - let conn = conn.await.e()?; + let conn = conn.await.anyerr()?; let client_alpn = conn.alpn(); conn.closed().await; client.close().await; server.close().await; - let server_alpn = server_task.await.e()??; + let server_alpn = server_task.await.anyerr()??; assert_eq!(client_alpn, server_alpn); @@ -3134,7 +3110,7 @@ mod tests { let endpoint = Endpoint::empty_builder(RelayMode::Staging).bind().await?; // can get a first report - endpoint.net_report().updated().await?; + endpoint.net_report().updated().await.anyerr()?; Ok(()) } @@ -3165,7 +3141,7 @@ mod tests { let endpoint = Endpoint::empty_builder(RelayMode::Disabled) .bind() .await - .e()?; + .anyerr()?; let addr = endpoint.addr(); let router = Router::builder(endpoint).accept(NOOP_ALPN, Noop).spawn(); Ok((router, addr)) @@ -3178,7 +3154,7 @@ mod tests { .await .into_iter() .collect::, _>>() - .e()?; + .anyerr()?; let addrs = routers .iter() @@ -3190,7 +3166,7 @@ mod tests { .discovery(discovery) .bind() .await - .e()?; + .anyerr()?; // wait for the endpoint to be initialized. This should not be needed, // but we don't want to measure endpoint init time but connection time // from a fully initialized endpoint. diff --git a/iroh/src/endpoint/presets.rs b/iroh/src/endpoint/presets.rs index 412af114947..8732c074f73 100644 --- a/iroh/src/endpoint/presets.rs +++ b/iroh/src/endpoint/presets.rs @@ -3,7 +3,7 @@ //! # Example //! //! ```no_run -//! # async fn wrapper() -> n0_snafu::Result { +//! # async fn wrapper() -> n0_error::Result { //! use iroh::{Endpoint, RelayMode, Watcher, endpoint::presets}; //! //! let endpoint = Endpoint::empty_builder(RelayMode::Disabled) diff --git a/iroh/src/key.rs b/iroh/src/key.rs index 64f8d8bb051..c6b9f6ee483 100644 --- a/iroh/src/key.rs +++ b/iroh/src/key.rs @@ -4,8 +4,7 @@ use std::fmt::Debug; use aead::{AeadCore, AeadInOut, Buffer}; use iroh_base::{PublicKey, SecretKey}; -use nested_enum_utils::common_fields; -use snafu::{ResultExt, Snafu, ensure}; +use n0_error::{e, ensure, stack_error}; pub(crate) const NONCE_LEN: usize = 24; @@ -25,20 +24,18 @@ pub(super) fn secret_ed_box(key: &SecretKey) -> crypto_box::SecretKey { pub struct SharedSecret(crypto_box::ChaChaBox); /// Errors that can occur during [`SharedSecret::open`]. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] pub enum DecryptionError { /// The nonce had the wrong size. - #[snafu(display("Invalid nonce"))] - InvalidNonce {}, + #[error("Invalid nonce")] + InvalidNonce, /// AEAD decryption failed. - #[snafu(display("Aead error"))] - Aead { source: aead::Error }, + #[error("Aead error")] + Aead { + #[error(std_err)] + source: aead::Error, + }, } impl Debug for SharedSecret { @@ -66,17 +63,15 @@ impl SharedSecret { /// Opens the ciphertext, which must have been created using `Self::seal`, and places the clear text into the provided buffer. pub fn open(&self, buffer: &mut dyn Buffer) -> Result<(), DecryptionError> { - ensure!(buffer.len() >= NONCE_LEN, InvalidNonceSnafu); + ensure!(buffer.len() >= NONCE_LEN, DecryptionError::InvalidNonce); let offset = buffer.len() - NONCE_LEN; let nonce: [u8; NONCE_LEN] = buffer.as_ref()[offset..] .try_into() - .map_err(|_| InvalidNonceSnafu.build())?; + .map_err(|_| e!(DecryptionError::InvalidNonce))?; buffer.truncate(offset); - self.0 - .decrypt_in_place(&nonce.into(), AEAD_DATA, buffer) - .context(AeadSnafu)?; + self.0.decrypt_in_place(&nonce.into(), AEAD_DATA, buffer)?; Ok(()) } diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index eb109682d1b..540d8235d71 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -9,16 +9,16 @@ //! //! ```no_run //! # use iroh::{Endpoint, EndpointAddr}; -//! # use n0_snafu::ResultExt; -//! # async fn wrapper() -> n0_snafu::Result { +//! # use n0_error::{StackResultExt, StdResultExt}; +//! # async fn wrapper() -> n0_error::Result<()> { //! let addr: EndpointAddr = todo!(); //! let ep = Endpoint::bind().await?; //! let conn = ep.connect(addr, b"my-alpn").await?; -//! let mut send_stream = conn.open_uni().await.context("unable to open uni")?; +//! let mut send_stream = conn.open_uni().await.std_context("unable to open uni")?; //! send_stream //! .write_all(b"msg") //! .await -//! .context("unable to write all")?; +//! .std_context("unable to write all")?; //! # Ok(()) //! # } //! ``` @@ -27,8 +27,8 @@ //! //! ```no_run //! # use iroh::{Endpoint, EndpointAddr}; -//! # use n0_snafu::ResultExt; -//! # async fn wrapper() -> n0_snafu::Result { +//! # use n0_error::{StackResultExt, StdResultExt}; +//! # async fn wrapper() -> n0_error::Result<()> { //! let ep = Endpoint::builder() //! .alpns(vec![b"my-alpn".to_vec()]) //! .bind() @@ -38,13 +38,13 @@ //! .await //! .context("accept error")? //! .await -//! .context("connecting error")?; -//! let mut recv_stream = conn.accept_uni().await.context("unable to open uni")?; +//! .std_context("connecting error")?; +//! let mut recv_stream = conn.accept_uni().await.std_context("unable to open uni")?; //! let mut buf = [0u8; 3]; //! recv_stream //! .read_exact(&mut buf) //! .await -//! .context("unable to read")?; +//! .std_context("unable to read")?; //! # Ok(()) //! # } //! ``` @@ -171,7 +171,7 @@ //! //! ```no_run //! use iroh::{Endpoint, EndpointAddr}; -//! use n0_snafu::{Result, ResultExt}; +//! use n0_error::{Result, StackResultExt, StdResultExt}; //! //! async fn connect(addr: EndpointAddr) -> Result<()> { //! // The Endpoint is the central object that manages an iroh node. @@ -179,10 +179,10 @@ //! //! // Establish a QUIC connection, open a bi-directional stream, exchange messages. //! let conn = ep.connect(addr, b"hello-world").await?; -//! let (mut send_stream, mut recv_stream) = conn.open_bi().await.context("open bi")?; -//! send_stream.write_all(b"hello").await.context("write")?; -//! send_stream.finish().context("finish")?; -//! let _msg = recv_stream.read_to_end(10).await.context("read")?; +//! let (mut send_stream, mut recv_stream) = conn.open_bi().await.std_context("open bi")?; +//! send_stream.write_all(b"hello").await.std_context("write")?; +//! send_stream.finish().std_context("finish")?; +//! let _msg = recv_stream.read_to_end(10).await.std_context("read")?; //! //! // Gracefully close the connection and endpoint. //! conn.close(1u8.into(), b"done"); @@ -196,8 +196,8 @@ //! //! ```no_run //! use iroh::{Endpoint, EndpointAddr}; +//! use n0_error::{Result, StackResultExt, StdResultExt}; //! use n0_future::StreamExt; -//! use n0_snafu::{Result, ResultExt}; //! //! async fn accept() -> Result<()> { //! // To accept connections at least one ALPN must be configured. @@ -212,11 +212,12 @@ //! .await //! .context("no incoming connection")? //! .await -//! .context("accept conn")?; -//! let (mut send_stream, mut recv_stream) = conn.accept_bi().await.context("accept stream")?; -//! let _msg = recv_stream.read_to_end(10).await.context("read")?; -//! send_stream.write_all(b"world").await.context("write")?; -//! send_stream.finish().context("finish")?; +//! .std_context("accept conn")?; +//! let (mut send_stream, mut recv_stream) = +//! conn.accept_bi().await.std_context("accept stream")?; +//! let _msg = recv_stream.read_to_end(10).await.std_context("read")?; +//! send_stream.write_all(b"world").await.std_context("write")?; +//! send_stream.finish().std_context("finish")?; //! //! // Wait for the client to close the connection and gracefully close the endpoint. //! conn.closed().await; diff --git a/iroh/src/magicsock.rs b/iroh/src/magicsock.rs index a05c2a1a96f..fb3f3d22a30 100644 --- a/iroh/src/magicsock.rs +++ b/iroh/src/magicsock.rs @@ -32,19 +32,18 @@ use bytes::Bytes; use data_encoding::HEXLOWER; use iroh_base::{EndpointAddr, EndpointId, PublicKey, RelayUrl, SecretKey, TransportAddr}; use iroh_relay::{RelayConfig, RelayMap}; +use n0_error::{e, stack_error}; use n0_future::{ task::{self, AbortOnDropHandle}, time::{self, Duration, Instant}, }; use n0_watcher::{self, Watchable, Watcher}; -use nested_enum_utils::common_fields; use netwatch::netmon; #[cfg(not(wasm_browser))] use netwatch::{UdpSocket, ip::LocalAddresses}; use quinn::{AsyncUdpSocket, ServerConfig}; use rand::Rng; use smallvec::SmallVec; -use snafu::{ResultExt, Snafu}; use tokio::sync::{Mutex as AsyncMutex, mpsc}; use tokio_util::sync::CancellationToken; use tracing::{ @@ -214,21 +213,15 @@ pub(crate) struct MagicSock { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum AddEndpointAddrError { - #[snafu(display("Empty addressing info"))] - Empty {}, - #[snafu(display("Empty addressing info, {pruned} direct address have been pruned"))] + #[error("Empty addressing info")] + Empty, + #[error("Empty addressing info, {pruned} direct address have been pruned")] EmptyPruned { pruned: usize }, - #[snafu(display("Adding our own address is not supported"))] - OwnAddress {}, + #[error("Adding our own address is not supported")] + OwnAddress, } impl MagicSock { @@ -417,9 +410,9 @@ impl MagicSock { .add_endpoint_addr(addr, source, have_ipv6, &self.metrics.magicsock); Ok(()) } else if pruned != 0 { - Err(EmptyPrunedSnafu { pruned }.build()) + Err(e!(AddEndpointAddrError::EmptyPruned { pruned })) } else { - Err(EmptySnafu.build()) + Err(e!(AddEndpointAddrError::Empty)) } } @@ -1338,23 +1331,18 @@ impl DirectAddrUpdateState { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum CreateHandleError { - #[snafu(display("Failed to create bind sockets"))] + #[error("Failed to create bind sockets")] BindSockets { source: io::Error }, - #[snafu(display("Failed to create internal quinn endpoint"))] + #[error("Failed to create internal quinn endpoint")] CreateQuinnEndpoint { source: io::Error }, - #[snafu(display("Failed to create socket state"))] + #[error("Failed to create socket state")] CreateSocketState { source: io::Error }, - #[snafu(display("Failed to create netmon monitor"))] + #[error("Failed to create netmon monitor")] CreateNetmonMonitor { source: netmon::Error }, - #[snafu(display("Failed to subscribe netmon monitor"))] + #[error("Failed to subscribe netmon monitor")] SubscribeNetmonMonitor { source: netmon::Error }, } @@ -1383,8 +1371,8 @@ impl Handle { let addr_v4 = addr_v4.unwrap_or_else(|| SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)); #[cfg(not(wasm_browser))] - let (ip_transports, port_mapper) = - bind_ip(addr_v4, addr_v6, &metrics).context(BindSocketsSnafu)?; + let (ip_transports, port_mapper) = bind_ip(addr_v4, addr_v6, &metrics) + .map_err(|err| e!(CreateHandleError::BindSockets, err))?; let ip_mapped_addrs = IpMappedAddresses::default(); @@ -1474,11 +1462,11 @@ impl Handle { #[cfg(wasm_browser)] Arc::new(crate::web_runtime::WebRuntime), ) - .context(CreateQuinnEndpointSnafu)?; + .map_err(|err| e!(CreateHandleError::CreateQuinnEndpoint, err))?; let network_monitor = netmon::Monitor::new() .await - .context(CreateNetmonMonitorSnafu)?; + .map_err(|err| e!(CreateHandleError::CreateNetmonMonitor, err))?; let qad_endpoint = endpoint.clone(); @@ -1687,8 +1675,9 @@ impl DiscoState { ) -> Result { let mut sealed_box = sealed_box.to_vec(); self.get_secret(endpoint_id, |secret| secret.open(&mut sealed_box)) - .context(OpenSnafu)?; - disco::Message::from_bytes(&sealed_box).context(ParseSnafu) + .map_err(|source| e!(DiscoBoxError::Open { source }))?; + disco::Message::from_bytes(&sealed_box) + .map_err(|source| e!(DiscoBoxError::Parse { source })) } fn get_secret(&self, endpoint_id: PublicKey, cb: F) -> T @@ -1705,24 +1694,13 @@ impl DiscoState { } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] enum DiscoBoxError { - #[snafu(display("Failed to open crypto box"))] - Open { - #[snafu(source(from(DecryptionError, Box::new)))] - source: Box, - }, - #[snafu(display("Failed to parse disco message"))] - Parse { - #[snafu(source(from(disco::ParseError, Box::new)))] - source: Box, - }, + #[error("Failed to open crypto box")] + Open { source: DecryptionError }, + #[error("Failed to parse disco message")] + Parse { source: disco::ParseError }, } #[derive(Debug)] @@ -2403,8 +2381,8 @@ impl DiscoveredDirectAddrs { pub(crate) struct EndpointIdMappedAddr(Ipv6Addr); /// Can occur when converting a [`SocketAddr`] to an [`EndpointIdMappedAddr`] -#[derive(Debug, Snafu)] -#[snafu(display("Failed to convert"))] +#[stack_error(derive, add_meta)] +#[error("Failed to convert")] pub struct EndpointIdMappedAddrError; /// Counter to always generate unique addresses for [`EndpointIdMappedAddr`]. @@ -2459,7 +2437,7 @@ impl TryFrom for EndpointIdMappedAddr { { return Ok(Self(value)); } - Err(EndpointIdMappedAddrError) + Err(e!(EndpointIdMappedAddrError)) } } @@ -2543,8 +2521,8 @@ mod tests { use data_encoding::HEXLOWER; use iroh_base::{EndpointAddr, EndpointId, PublicKey, TransportAddr}; + use n0_error::{Result, StdResultExt}; use n0_future::{StreamExt, time}; - use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher; use quinn::ServerConfig; use rand::{CryptoRng, Rng, RngCore, SeedableRng}; @@ -2716,7 +2694,7 @@ mod tests { } }) .await - .context("timeout")?; + .std_context("timeout")?; info!("all endpoints meshed"); Ok(tasks) } @@ -2727,24 +2705,24 @@ mod tests { let conn = ep.endpoint.accept().await.expect("no conn"); info!("connecting"); - let conn = conn.await.context("connecting")?; + let conn = conn.await.std_context("connecting")?; info!("accepting bi"); - let (mut send_bi, mut recv_bi) = conn.accept_bi().await.context("accept bi")?; + let (mut send_bi, mut recv_bi) = conn.accept_bi().await.std_context("accept bi")?; info!("reading"); let val = recv_bi .read_to_end(usize::MAX) .await - .context("read to end")?; + .std_context("read to end")?; info!("replying"); for chunk in val.chunks(12) { - send_bi.write_all(chunk).await.context("write all")?; + send_bi.write_all(chunk).await.std_context("write all")?; } info!("finishing"); - send_bi.finish().context("finish")?; - send_bi.stopped().await.context("stopped")?; + send_bi.finish().std_context("finish")?; + send_bi.stopped().await.std_context("stopped")?; let stats = conn.stats(); info!("stats: {:#?}", stats); @@ -2776,20 +2754,20 @@ mod tests { let conn = ep.endpoint.connect(dest, ALPN).await?; info!("opening bi"); - let (mut send_bi, mut recv_bi) = conn.open_bi().await.context("open bi")?; + let (mut send_bi, mut recv_bi) = conn.open_bi().await.std_context("open bi")?; info!("writing message"); - send_bi.write_all(msg).await.context("write all")?; + send_bi.write_all(msg).await.std_context("write all")?; info!("finishing"); - send_bi.finish().context("finish")?; - send_bi.stopped().await.context("stopped")?; + send_bi.finish().std_context("finish")?; + send_bi.stopped().await.std_context("stopped")?; info!("reading_to_end"); let val = recv_bi .read_to_end(usize::MAX) .await - .context("read to end")?; + .std_context("read to end")?; assert_eq!( val, msg, @@ -2898,7 +2876,7 @@ mod tests { #[tokio::test] #[traced_test] - async fn test_regression_network_change_rebind_wakes_connection_driver() -> n0_snafu::Result { + async fn test_regression_network_change_rebind_wakes_connection_driver() -> Result { let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0u64); let m1 = MagicStack::new(&mut rng, RelayMode::Disabled).await; let m2 = MagicStack::new(&mut rng, RelayMode::Disabled).await; @@ -2914,11 +2892,11 @@ mod tests { async move { while let Some(incoming) = endpoint.accept().await { println!("Incoming first conn!"); - let conn = incoming.await.e()?; + let conn = incoming.await.anyerr()?; conn.closed().await; } - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) } })); @@ -2940,7 +2918,7 @@ mod tests { test_two_devices_roundtrip_network_change_impl(), ) .await - .context("timeout")? + .std_context("timeout")? } /// Same structure as `test_two_devices_roundtrip_quinn_magic`, but interrupts regularly @@ -3166,8 +3144,8 @@ mod tests { mapped_addr.private_socket_addr(), &tls::name::encode(endpoint_id), ) - .context("connect")?; - let connection = connect.await.e()?; + .std_context("connect")?; + let connection = connect.await.anyerr()?; Ok(connection) } @@ -3210,12 +3188,12 @@ mod tests { // This needs an accept task let accept_task = tokio::spawn({ async fn accept(ep: quinn::Endpoint) -> Result<()> { - let incoming = ep.accept().await.context("no incoming")?; + let incoming = ep.accept().await.std_context("no incoming")?; let _conn = incoming .accept() - .context("accept")? + .std_context("accept")? .await - .context("connecting")?; + .std_context("connecting")?; // Keep this connection alive for a while tokio::time::sleep(Duration::from_secs(10)).await; @@ -3287,14 +3265,17 @@ mod tests { // We need a task to accept the connection. let accept_task = tokio::spawn({ async fn accept(ep: quinn::Endpoint) -> Result<()> { - let incoming = ep.accept().await.context("no incoming")?; + let incoming = ep.accept().await.std_context("no incoming")?; let conn = incoming .accept() - .context("accept")? + .std_context("accept")? + .await + .std_context("connecting")?; + let mut stream = conn.accept_uni().await.std_context("accept uni")?; + stream + .read_to_end(1 << 16) .await - .context("connecting")?; - let mut stream = conn.accept_uni().await.context("accept uni")?; - stream.read_to_end(1 << 16).await.context("read to end")?; + .std_context("read to end")?; info!("accept finished"); Ok(()) } diff --git a/iroh/src/magicsock/transports/relay/actor.rs b/iroh/src/magicsock/transports/relay/actor.rs index 8ad69779c9a..5b9d8b226bf 100644 --- a/iroh/src/magicsock/transports/relay/actor.rs +++ b/iroh/src/magicsock/transports/relay/actor.rs @@ -44,15 +44,14 @@ use iroh_relay::{ client::{Client, ConnectError, RecvError, SendError}, protos::relay::{ClientToRelayMsg, Datagrams, RelayToClientMsg}, }; +use n0_error::{e, stack_error}; use n0_future::{ FuturesUnorderedBounded, SinkExt, StreamExt, task::JoinSet, time::{self, Duration, Instant, MissedTickBehavior}, }; use n0_watcher::Watchable; -use nested_enum_utils::common_fields; use netwatch::interfaces; -use snafu::{IntoError, ResultExt, Snafu}; use tokio::sync::{mpsc, oneshot}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, Level, debug, error, event, info, info_span, instrument, trace, warn}; @@ -211,60 +210,48 @@ struct RelayConnectionOptions { /// Possible reasons for a failed relay connection. #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] enum RelayConnectionError { - #[snafu(display("Failed to connect to relay server"))] + #[error("Failed to connect to relay server")] Dial { source: DialError }, - #[snafu(display("Failed to handshake with relay server"))] + #[error("Failed to handshake with relay server")] Handshake { source: RunError }, - #[snafu(display("Lost connection to relay server"))] + #[error("Lost connection to relay server")] Established { source: RunError }, } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] enum RunError { - #[snafu(display("Send timeout"))] - SendTimeout {}, - #[snafu(display("Ping timeout"))] - PingTimeout {}, - #[snafu(display("Local IP no longer valid"))] - LocalIpInvalid {}, - #[snafu(display("No local address"))] - LocalAddrMissing {}, - #[snafu(display("Stream closed by server."))] - StreamClosedServer {}, - #[snafu(display("Client stream read failed"))] - ClientStreamRead { source: RecvError }, - #[snafu(display("Client stream write failed"))] - ClientStreamWrite { source: SendError }, + #[error("Send timeout")] + SendTimeout, + #[error("Ping timeout")] + PingTimeout, + #[error("Local IP no longer valid")] + LocalIpInvalid, + #[error("No local address")] + LocalAddrMissing, + #[error("Stream closed by server.")] + StreamClosedServer, + #[error("Client stream read failed")] + ClientStreamRead { + #[error(std_err)] + source: RecvError, + }, + #[error("Client stream write failed")] + ClientStreamWrite { + #[error(std_err)] + source: SendError, + }, } #[allow(missing_docs)] -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] enum DialError { - #[snafu(display("timeout (>{timeout:?}) trying to establish a connection"))] + #[error("timeout (>{timeout:?}) trying to establish a connection")] Timeout { timeout: Duration }, - #[snafu(display("unable to connect"))] - Connect { - #[snafu(source(from(ConnectError, Box::new)))] - source: Box, - }, + #[error("unable to connect")] + Connect { source: ConnectError }, } impl ActiveRelayActor { @@ -375,7 +362,7 @@ impl ActiveRelayActor { /// be retried with a backoff. async fn run_once(&mut self) -> Result<(), RelayConnectionError> { let client = match self.run_dialing().instrument(info_span!("dialing")).await { - Some(client_res) => client_res.context(DialSnafu)?, + Some(client_res) => client_res.map_err(|err| e!(RelayConnectionError::Dial, err))?, None => return Ok(()), }; self.run_connected(client) @@ -497,11 +484,10 @@ impl ActiveRelayActor { async move { match time::timeout(CONNECT_TIMEOUT, client_builder.connect()).await { Ok(Ok(client)) => Ok(client), - Ok(Err(err)) => Err(ConnectSnafu.into_error(err)), - Err(_) => Err(TimeoutSnafu { - timeout: CONNECT_TIMEOUT, - } - .build()), + Ok(Err(err)) => Err(e!(DialError::Connect, err)), + Err(_) => Err(e!(DialError::Timeout { + timeout: CONNECT_TIMEOUT + })), } } } @@ -523,7 +509,7 @@ impl ActiveRelayActor { ); let (mut client_stream, client_sink) = client.split(); - let mut client_sink = client_sink.sink_map_err(|e| ClientStreamWriteSnafu.into_error(e)); + let mut client_sink = client_sink.sink_map_err(|e| e!(RunError::ClientStreamWrite, e)); let mut state = ConnectedRelayState { ping_tracker: PingTracker::default(), @@ -568,7 +554,7 @@ impl ActiveRelayActor { } } _ = state.ping_tracker.timeout() => { - break Err(PingTimeoutSnafu.build()); + break Err(e!(RunError::PingTimeout)); } _ = ping_interval.tick() => { let data = state.ping_tracker.new_ping(); @@ -591,8 +577,8 @@ impl ActiveRelayActor { let fut = client_sink.send(ClientToRelayMsg::Ping(data)); self.run_sending(fut, &mut state, &mut client_stream).await?; } - Some(_) => break Err(LocalIpInvalidSnafu.build()), - None => break Err(LocalAddrMissingSnafu.build()), + Some(_) => break Err(e!(RunError::LocalIpInvalid)), + None => break Err(e!(RunError::LocalAddrMissing)), } } #[cfg(test)] @@ -633,7 +619,7 @@ impl ActiveRelayActor { } msg = client_stream.next() => { let Some(msg) = msg else { - break Err(StreamClosedServerSnafu.build()); + break Err(e!(RunError::StreamClosedServer)); }; match msg { Ok(msg) => { @@ -641,7 +627,7 @@ impl ActiveRelayActor { // reset the ping timer, we have just received a message ping_interval.reset(); }, - Err(err) => break Err(ClientStreamReadSnafu.into_error(err)), + Err(err) => break Err(e!(RunError::ClientStreamRead, err)), } } _ = &mut self.inactive_timeout, if !self.is_home_relay => { @@ -743,7 +729,7 @@ impl ActiveRelayActor { break Ok(()); } _ = &mut timeout => { - break Err(SendTimeoutSnafu.build()); + break Err(e!(RunError::SendTimeout)); } msg = self.prio_inbox.recv() => { let Some(msg) = msg else { @@ -764,16 +750,16 @@ impl ActiveRelayActor { } } _ = state.ping_tracker.timeout() => { - break Err(PingTimeoutSnafu.build()); + break Err(e!(RunError::PingTimeout)); } // No need to read the inbox or datagrams to send. msg = client_stream.next() => { let Some(msg) = msg else { - break Err(StreamClosedServerSnafu.build()); + break Err(e!(RunError::StreamClosedServer)); }; match msg { Ok(msg) => self.handle_relay_msg(msg, state), - Err(err) => break Err(ClientStreamReadSnafu.into_error(err)), + Err(err) => break Err(e!(RunError::ClientStreamRead, err)), } } _ = &mut self.inactive_timeout, if !self.is_home_relay => { @@ -814,9 +800,9 @@ struct ConnectedRelayState { impl ConnectedRelayState { fn map_err(&self, error: RunError) -> RelayConnectionError { if self.established { - EstablishedSnafu.into_error(error) + e!(RelayConnectionError::Established, error) } else { - HandshakeSnafu.into_error(error) + e!(RelayConnectionError::Handshake, error) } } } @@ -1238,7 +1224,7 @@ mod tests { use iroh_base::{EndpointId, RelayUrl, SecretKey}; use iroh_relay::{PingTracker, protos::relay::Datagrams}; - use n0_snafu::{Error, Result, ResultExt}; + use n0_error::{AnyError as Error, Result, StackResultExt, StdResultExt}; use tokio::sync::{mpsc, oneshot}; use tokio_util::{sync::CancellationToken, task::AbortOnDropHandle}; use tracing::{Instrument, info, info_span}; @@ -1357,7 +1343,7 @@ mod tests { tokio::time::timeout(Duration::from_secs(10), async move { loop { let res = tokio::time::timeout(UNDELIVERABLE_DATAGRAM_TIMEOUT, async { - tx.send(item.clone()).await.context("send item")?; + tx.send(item.clone()).await.std_context("send item")?; let RelayRecvDatagram { url: _, src: _, @@ -1421,17 +1407,17 @@ mod tests { inbox_tx .send(ActiveRelayMessage::GetLocalAddr(tx)) .await - .context("send get local addr msg")?; + .std_context("send get local addr msg")?; let local_addr = rx .await - .context("wait for local addr msg")? + .std_context("wait for local addr msg")? .context("no local addr")?; info!(?local_addr, "check connection with addr"); inbox_tx .send(ActiveRelayMessage::CheckConnection(vec![local_addr.ip()])) .await - .context("send check connection message")?; + .std_context("send check connection message")?; // Sync the ActiveRelayActor. Ping blocks it and we want to be sure it has handled // another inbox message before continuing. @@ -1439,8 +1425,8 @@ mod tests { inbox_tx .send(ActiveRelayMessage::GetLocalAddr(tx)) .await - .context("send get local addr msg")?; - rx.await.context("recv send local addr msg")?; + .std_context("send get local addr msg")?; + rx.await.std_context("recv send local addr msg")?; // Echo should still work. info!("second echo"); @@ -1457,7 +1443,7 @@ mod tests { inbox_tx .send(ActiveRelayMessage::CheckConnection(Vec::new())) .await - .context("send check connection msg")?; + .std_context("send check connection msg")?; // Give some time to reconnect, mostly to sort logs rather than functional. tokio::time::sleep(Duration::from_millis(10)).await; @@ -1473,7 +1459,7 @@ mod tests { // Shut down the actor. cancel_token.cancel(); - task.await.context("wait for task to finish")?; + task.await.std_context("wait for task to finish")?; Ok(()) } @@ -1515,7 +1501,7 @@ mod tests { } }) .await - .context("timeout")?; + .std_context("timeout")?; // From now on, we pause time tokio::time::pause(); diff --git a/iroh/src/net_report.rs b/iroh/src/net_report.rs index 0ba629544b2..24484bcaeaf 100644 --- a/iroh/src/net_report.rs +++ b/iroh/src/net_report.rs @@ -31,6 +31,9 @@ use iroh_relay::{ RelayMap, quic::{QUIC_ADDR_DISC_CLOSE_CODE, QUIC_ADDR_DISC_CLOSE_REASON}, }; +use n0_error::e; +#[cfg(not(wasm_browser))] +use n0_error::stack_error; #[cfg(not(wasm_browser))] use n0_future::task; use n0_future::{ @@ -78,6 +81,23 @@ pub(crate) mod portmapper { pub(crate) use ip_mapped_addrs::{IpMappedAddr, IpMappedAddresses}; pub(crate) use self::reportgen::IfStateDetails; +#[cfg(not(wasm_browser))] +#[allow(missing_docs)] +#[stack_error(derive, add_meta)] +#[non_exhaustive] +enum QadProbeError { + #[error("Failed to resolve relay address")] + GetRelayAddr { + source: self::reportgen::GetRelayAddrError, + }, + #[error("Missing host in relay URL")] + MissingHost, + #[error("QUIC connection failed")] + Quic { source: iroh_relay::quic::Error }, + #[error("Receiver dropped")] + ReceiverDropped, +} + #[cfg(not(wasm_browser))] use self::reportgen::SocketState; pub use self::{ @@ -749,23 +769,29 @@ async fn run_probe_v4( relay: Arc, quic_client: QuicClient, dns_resolver: DnsResolver, -) -> n0_snafu::Result<(QadProbeReport, QadConn)> { - use n0_snafu::ResultExt; - - let relay_addr_orig = reportgen::get_relay_addr_ipv4(&dns_resolver, &relay).await?; +) -> n0_error::Result<(QadProbeReport, QadConn), QadProbeError> { + let relay_addr_orig = reportgen::get_relay_addr_ipv4(&dns_resolver, &relay) + .await + .map_err(|source| e!(QadProbeError::GetRelayAddr { source }))?; let relay_addr = reportgen::maybe_to_mapped_addr(ip_mapped_addrs.as_ref(), relay_addr_orig.into()); debug!(?relay_addr_orig, ?relay_addr, "relay addr v4"); - let host = relay.url.host_str().context("missing host url")?; - let conn = quic_client.create_conn(relay_addr, host).await?; + let host = relay + .url + .host_str() + .ok_or_else(|| e!(QadProbeError::MissingHost))?; + let conn = quic_client + .create_conn(relay_addr, host) + .await + .map_err(|source| e!(QadProbeError::Quic { source }))?; let mut receiver = conn.observed_external_addr(); // wait for an addr let addr = receiver .wait_for(|addr| addr.is_some()) .await - .context("receiver dropped")? + .map_err(|_| e!(QadProbeError::ReceiverDropped))? .expect("known"); let report = QadProbeReport { relay: relay.url.clone(), @@ -817,22 +843,29 @@ async fn run_probe_v6( relay: Arc, quic_client: QuicClient, dns_resolver: DnsResolver, -) -> n0_snafu::Result<(QadProbeReport, QadConn)> { - use n0_snafu::ResultExt; - let relay_addr_orig = reportgen::get_relay_addr_ipv6(&dns_resolver, &relay).await?; +) -> n0_error::Result<(QadProbeReport, QadConn), QadProbeError> { + let relay_addr_orig = reportgen::get_relay_addr_ipv6(&dns_resolver, &relay) + .await + .map_err(|source| e!(QadProbeError::GetRelayAddr { source }))?; let relay_addr = reportgen::maybe_to_mapped_addr(ip_mapped_addrs.as_ref(), relay_addr_orig.into()); debug!(?relay_addr_orig, ?relay_addr, "relay addr v6"); - let host = relay.url.host_str().context("missing host url")?; - let conn = quic_client.create_conn(relay_addr, host).await?; + let host = relay + .url + .host_str() + .ok_or_else(|| e!(QadProbeError::MissingHost))?; + let conn = quic_client + .create_conn(relay_addr, host) + .await + .map_err(|source| e!(QadProbeError::Quic { source }))?; let mut receiver = conn.observed_external_addr(); // wait for an addr let addr = receiver .wait_for(|addr| addr.is_some()) .await - .context("receiver dropped")? + .map_err(|_| e!(QadProbeError::ReceiverDropped))? .expect("known"); let report = QadProbeReport { relay: relay.url.clone(), @@ -921,7 +954,7 @@ mod tests { use iroh_base::RelayUrl; use iroh_relay::dns::DnsResolver; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use tokio_util::sync::CancellationToken; use tracing_test::traced_test; @@ -933,7 +966,8 @@ mod tests { async fn test_basic() -> Result<()> { let (server, relay) = test_utils::relay().await; let client_config = iroh_relay::client::make_dangerous_client_config(); - let ep = quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)).e()?; + let ep = + quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)).anyerr()?; let quic_addr_disc = QuicConfig { ep: ep.clone(), client_config, diff --git a/iroh/src/net_report/ip_mapped_addrs.rs b/iroh/src/net_report/ip_mapped_addrs.rs index 2a8927edd92..90f1efbc964 100644 --- a/iroh/src/net_report/ip_mapped_addrs.rs +++ b/iroh/src/net_report/ip_mapped_addrs.rs @@ -7,11 +7,11 @@ use std::{ }, }; -use snafu::Snafu; +use n0_error::{e, stack_error}; /// Can occur when converting a [`SocketAddr`] to an [`IpMappedAddr`] -#[derive(Debug, Snafu)] -#[snafu(display("Failed to convert"))] +#[stack_error(derive, add_meta)] +#[error("Failed to convert")] pub struct IpMappedAddrError; /// A map fake Ipv6 address with an actual IP address. @@ -73,7 +73,7 @@ impl TryFrom for IpMappedAddr { { return Ok(Self(value)); } - Err(IpMappedAddrError) + Err(e!(IpMappedAddrError)) } } diff --git a/iroh/src/net_report/probes.rs b/iroh/src/net_report/probes.rs index 4f4e153b941..92884ca3eca 100644 --- a/iroh/src/net_report/probes.rs +++ b/iroh/src/net_report/probes.rs @@ -8,7 +8,6 @@ use std::{collections::BTreeSet, fmt, sync::Arc}; use iroh_relay::{RelayConfig, RelayMap}; use n0_future::time::Duration; -use snafu::Snafu; use crate::net_report::Report; @@ -52,10 +51,6 @@ pub(super) struct ProbeSet { probes: Vec<(Duration, Arc)>, } -#[derive(Debug, Snafu)] -#[snafu(display("Mismatching probe"))] -struct PushError; - impl ProbeSet { fn new(proto: Probe) -> Self { Self { diff --git a/iroh/src/net_report/reportgen.rs b/iroh/src/net_report/reportgen.rs index 10ec6d19ea5..8b9fe40642f 100644 --- a/iroh/src/net_report/reportgen.rs +++ b/iroh/src/net_report/reportgen.rs @@ -33,6 +33,7 @@ use iroh_relay::{ dns::{DnsError, DnsResolver, StaggeredError}, quic::QuicClient, }; +use n0_error::{e, stack_error}; #[cfg(wasm_browser)] use n0_future::future::Pending; use n0_future::{ @@ -41,7 +42,6 @@ use n0_future::{ time::{self, Duration, Instant}, }; use rand::seq::IteratorRandom; -use snafu::{IntoError, OptionExt, ResultExt, Snafu}; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, trace, warn, warn_span}; @@ -176,17 +176,16 @@ struct Actor { } #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] #[non_exhaustive] -#[snafu(module)] pub(super) enum ProbesError { - #[snafu(display("Probe failed"))] + #[error("Probe failed")] ProbeFailure { source: ProbeError }, - #[snafu(display("All probes failed"))] + #[error("All probes failed")] AllProbesFailed, - #[snafu(display("Probe cancelled"))] + #[error("Probe cancelled")] Cancelled, - #[snafu(display("Probe timed out"))] + #[error("Probe timed out")] Timeout, } @@ -300,8 +299,8 @@ impl Actor { Some(Ok(Ok(found))) => Some(found), Some(Ok(Err(err))) => { match err { - CaptivePortalError::CreateReqwestClient { source } - | CaptivePortalError::HttpRequest { source } + CaptivePortalError::CreateReqwestClient { source, .. } + | CaptivePortalError::HttpRequest { source, .. } if source.is_connect() => { debug!("check_captive_portal failed: {source:#}"); @@ -385,12 +384,10 @@ impl Actor { Some(Ok(Ok(report))) => Ok(report), Some(Ok(Err(err))) => { warn!("probe failed: {:#}", err); - Err(probes_error::ProbeFailureSnafu {}.into_error(err)) + Err(e!(ProbesError::ProbeFailure, err)) } - Some(Err(time::Elapsed { .. })) => { - Err(probes_error::TimeoutSnafu.build()) - } - None => Err(probes_error::CancelledSnafu.build()), + Some(Err(time::Elapsed { .. })) => Err(e!(ProbesError::Timeout)), + None => Err(e!(ProbesError::Cancelled)), }; ProbeFinished::Regular(res) } @@ -450,26 +447,24 @@ pub(super) struct HttpsProbeReport { } #[allow(missing_docs)] -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub(super) enum ProbeError { - #[snafu(display("Client is gone"))] + #[error("Client is gone")] ClientGone, - #[snafu(display("Probe is no longer useful"))] + #[error("Probe is no longer useful")] NotUseful, - #[snafu(display("Failed to run HTTPS probe"))] + #[error("Failed to run HTTPS probe")] Https { source: MeasureHttpsLatencyError }, } #[allow(missing_docs)] -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub(super) enum QuicError { - #[snafu(display("No relay available"))] + #[error("No relay available")] NoRelay, - #[snafu(display("URL must have 'host' to use QUIC address discovery probes"))] + #[error("URL must have 'host' to use QUIC address discovery probes")] InvalidUrl, } @@ -514,7 +509,7 @@ impl Probe { .await { Ok(report) => Ok(ProbeReport::Https(report)), - Err(err) => Err(probe_error::HttpsSnafu.into_error(err)), + Err(err) => Err(e!(ProbeError::Https, err)), } } #[cfg(not(wasm_browser))] @@ -535,16 +530,24 @@ pub(super) fn maybe_to_mapped_addr( } #[cfg(not(wasm_browser))] -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] enum CaptivePortalError { - #[snafu(transparent)] - DnsLookup { source: StaggeredError }, - #[snafu(display("Creating HTTP client failed"))] - CreateReqwestClient { source: reqwest::Error }, - #[snafu(display("HTTP request failed"))] - HttpRequest { source: reqwest::Error }, + #[error(transparent)] + DnsLookup { + #[error(from)] + source: StaggeredError, + }, + #[error("Creating HTTP client failed")] + CreateReqwestClient { + #[error(std_err)] + source: reqwest::Error, + }, + #[error("HTTP request failed")] + HttpRequest { + #[error(std_err)] + source: reqwest::Error, + }, } /// Reports whether or not we think the system is behind a @@ -597,7 +600,7 @@ async fn check_captive_portal( } let client = builder .build() - .context(captive_portal_error::CreateReqwestClientSnafu)?; + .map_err(|err| e!(CaptivePortalError::CreateReqwestClient, err))?; // Note: the set of valid characters in a challenge and the total // length is limited; see is_challenge_char in bin/iroh-relay for more @@ -611,7 +614,7 @@ async fn check_captive_portal( .header("X-Iroh-Challenge", &challenge) .send() .await - .context(captive_portal_error::HttpRequestSnafu)?; + .map_err(|err| e!(CaptivePortalError::HttpRequest, err))?; let expected_response = format!("response {challenge}"); let is_valid_response = res @@ -646,21 +649,20 @@ fn get_quic_port(relay: &RelayConfig) -> Option { } #[cfg(not(wasm_browser))] -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum GetRelayAddrError { - #[snafu(display("No valid hostname in the relay URL"))] + #[error("No valid hostname in the relay URL")] InvalidHostname, - #[snafu(display("No suitable relay address found"))] + #[error("No suitable relay address found")] NoAddrFound, - #[snafu(display("DNS lookup failed"))] + #[error("DNS lookup failed")] DnsLookup { source: StaggeredError }, - #[snafu(display("Relay is not suitable"))] + #[error("Relay is not suitable")] UnsupportedRelay, - #[snafu(display("HTTPS probes are not implemented"))] + #[error("HTTPS probes are not implemented")] UnsupportedHttps, - #[snafu(display("No port available for this protocol"))] + #[error("No port available for this protocol")] MissingPort, } @@ -670,7 +672,7 @@ pub(super) async fn get_relay_addr_ipv4( dns_resolver: &DnsResolver, relay: &RelayConfig, ) -> Result { - let port = get_quic_port(relay).context(get_relay_addr_error::MissingPortSnafu)?; + let port = get_quic_port(relay).ok_or_else(|| e!(GetRelayAddrError::MissingPort))?; relay_lookup_ipv4_staggered(dns_resolver, relay, port).await } @@ -679,7 +681,7 @@ pub(super) async fn get_relay_addr_ipv6( dns_resolver: &DnsResolver, relay: &RelayConfig, ) -> Result { - let port = get_quic_port(relay).context(get_relay_addr_error::MissingPortSnafu)?; + let port = get_quic_port(relay).ok_or_else(|| e!(GetRelayAddrError::MissingPort))?; relay_lookup_ipv6_staggered(dns_resolver, relay, port).await } @@ -706,13 +708,13 @@ async fn relay_lookup_ipv4_staggered( IpAddr::V4(ip) => SocketAddrV4::new(ip, port), IpAddr::V6(_) => unreachable!("bad DNS lookup: {:?}", addr), }) - .ok_or(get_relay_addr_error::NoAddrFoundSnafu.build()), - Err(err) => Err(get_relay_addr_error::DnsLookupSnafu.into_error(err)), + .ok_or_else(|| e!(GetRelayAddrError::NoAddrFound)), + Err(err) => Err(e!(GetRelayAddrError::DnsLookup, err)), } } Some(url::Host::Ipv4(addr)) => Ok(SocketAddrV4::new(addr, port)), - Some(url::Host::Ipv6(_addr)) => Err(get_relay_addr_error::NoAddrFoundSnafu.build()), - None => Err(get_relay_addr_error::InvalidHostnameSnafu.build()), + Some(url::Host::Ipv6(_addr)) => Err(e!(GetRelayAddrError::NoAddrFound)), + None => Err(e!(GetRelayAddrError::InvalidHostname)), } } @@ -738,30 +740,41 @@ async fn relay_lookup_ipv6_staggered( IpAddr::V4(_) => unreachable!("bad DNS lookup: {:?}", addr), IpAddr::V6(ip) => SocketAddrV6::new(ip, port, 0, 0), }) - .ok_or(get_relay_addr_error::NoAddrFoundSnafu.build()), - Err(err) => Err(get_relay_addr_error::DnsLookupSnafu.into_error(err)), + .ok_or_else(|| e!(GetRelayAddrError::NoAddrFound)), + Err(err) => Err(e!(GetRelayAddrError::DnsLookup, err)), } } - Some(url::Host::Ipv4(_addr)) => Err(get_relay_addr_error::NoAddrFoundSnafu.build()), + Some(url::Host::Ipv4(_addr)) => Err(e!(GetRelayAddrError::NoAddrFound)), Some(url::Host::Ipv6(addr)) => Ok(SocketAddrV6::new(addr, port, 0, 0)), - None => Err(get_relay_addr_error::InvalidHostnameSnafu.build()), + None => Err(e!(GetRelayAddrError::InvalidHostname)), } } -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] #[non_exhaustive] pub enum MeasureHttpsLatencyError { - #[snafu(transparent)] - InvalidUrl { source: url::ParseError }, + #[error(transparent)] + InvalidUrl { + #[error(std_err, from)] + source: url::ParseError, + }, #[cfg(not(wasm_browser))] - #[snafu(transparent)] - DnsLookup { source: StaggeredError }, - #[snafu(display("Creating HTTP client failed"))] - CreateReqwestClient { source: reqwest::Error }, - #[snafu(display("HTTP request failed"))] - HttpRequest { source: reqwest::Error }, - #[snafu(display("Error response from server {status}: {:?}", status.canonical_reason()))] + #[error(transparent)] + DnsLookup { + #[error(from)] + source: StaggeredError, + }, + #[error("Creating HTTP client failed")] + CreateReqwestClient { + #[error(std_err)] + source: reqwest::Error, + }, + #[error("HTTP request failed")] + HttpRequest { + #[error(std_err)] + source: reqwest::Error, + }, + #[error("Error response from server {status}: {:?}", status.canonical_reason())] InvalidResponse { status: StatusCode }, } @@ -811,14 +824,14 @@ async fn run_https_probe( let client = builder .build() - .context(measure_https_latency_error::CreateReqwestClientSnafu)?; + .map_err(|err| e!(MeasureHttpsLatencyError::CreateReqwestClient, err))?; let start = Instant::now(); let response = client .request(reqwest::Method::GET, url) .send() .await - .context(measure_https_latency_error::HttpRequestSnafu)?; + .map_err(|err| e!(MeasureHttpsLatencyError::HttpRequest, err))?; let latency = start.elapsed(); if response.status().is_success() { // Drain the response body to be nice to the server, up to a limit. @@ -835,10 +848,9 @@ async fn run_https_probe( Ok(HttpsProbeReport { relay, latency }) } else { - Err(measure_https_latency_error::InvalidResponseSnafu { - status: response.status(), - } - .build()) + Err(e!(MeasureHttpsLatencyError::InvalidResponse { + status: response.status() + })) } } @@ -847,7 +859,7 @@ mod tests { use std::net::Ipv4Addr; use iroh_relay::dns::DnsResolver; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use tracing_test::traced_test; use super::{super::test_utils, *}; @@ -870,8 +882,9 @@ mod tests { let (server, relay) = test_utils::relay().await; let relay = Arc::new(relay); let client_config = iroh_relay::client::make_dangerous_client_config(); - let ep = quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)).e()?; - let client_addr = ep.local_addr().e()?; + let ep = + quinn::Endpoint::client(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 0)).anyerr()?; + let client_addr = ep.local_addr().anyerr()?; let quic_client = iroh_relay::quic::QuicClient::new(ep.clone(), client_config); let dns_resolver = DnsResolver::default(); diff --git a/iroh/src/protocol.rs b/iroh/src/protocol.rs index 1567ad5919e..95610dfd66e 100644 --- a/iroh/src/protocol.rs +++ b/iroh/src/protocol.rs @@ -43,11 +43,11 @@ use std::{ }; use iroh_base::EndpointId; +use n0_error::{AnyError, e, stack_error}; use n0_future::{ join_all, task::{self, AbortOnDropHandle, JoinSet}, }; -use snafu::{Backtrace, Snafu}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, error, field::Empty, info_span, trace, warn}; @@ -70,10 +70,10 @@ use crate::{ /// /// ```no_run /// # use std::sync::Arc; -/// # use n0_snafu::ResultExt; +/// # use n0_error::StdResultExt; /// # use iroh::{endpoint::Connecting, protocol::{ProtocolHandler, Router}, Endpoint, EndpointAddr}; /// # -/// # async fn test_compile() -> n0_snafu::Result<()> { +/// # async fn test_compile() -> n0_error::Result<()> { /// let endpoint = Endpoint::bind().await?; /// /// let router = Router::builder(endpoint) @@ -81,8 +81,8 @@ use crate::{ /// .spawn(); /// /// // wait until the user wants to -/// tokio::signal::ctrl_c().await.context("ctrl+c")?; -/// router.shutdown().await.context("shutdown")?; +/// tokio::signal::ctrl_c().await.std_context("ctrl+c")?; +/// router.shutdown().await.std_context("shutdown")?; /// # Ok(()) /// # } /// ``` @@ -102,33 +102,28 @@ pub struct RouterBuilder { } #[allow(missing_docs)] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] pub enum AcceptError { - #[snafu(transparent)] + #[error(transparent)] Connection { source: crate::endpoint::ConnectionError, - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, }, - #[snafu(transparent)] + #[error(transparent)] MissingRemoteEndpointId { source: RemoteEndpointIdError }, - #[snafu(display("Not allowed."))] + #[error("Not allowed.")] NotAllowed {}, - - #[snafu(transparent)] - User { - source: Box, - }, + #[error(transparent)] + User { source: AnyError }, } impl AcceptError { /// Creates a new user error from an arbitrary error type. + #[track_caller] pub fn from_err(value: T) -> Self { - Self::User { - source: Box::new(value), - } + e!(AcceptError::User { + source: AnyError::from_std(value) + }) } } @@ -568,7 +563,7 @@ impl ProtocolHandler for AccessLimit

{ let is_allowed = (self.limiter)(remote); if !is_allowed { conn.close(0u32.into(), b"not allowed"); - return Err(NotAllowedSnafu.build()); + return Err(e!(AcceptError::NotAllowed)); } self.proto.accept(conn).await?; Ok(()) @@ -583,7 +578,7 @@ impl ProtocolHandler for AccessLimit

{ mod tests { use std::{sync::Mutex, time::Duration}; - use n0_snafu::{Result, ResultExt}; + use n0_error::{Result, StdResultExt}; use quinn::ApplicationClose; use super::*; @@ -597,7 +592,7 @@ mod tests { assert!(!router.is_shutdown()); assert!(!endpoint.is_closed()); - router.shutdown().await.e()?; + router.shutdown().await.anyerr()?; assert!(router.is_shutdown()); assert!(endpoint.is_closed()); @@ -641,11 +636,11 @@ mod tests { println!("connecting"); let conn = e2.connect(addr1, ECHO_ALPN).await?; - let (_send, mut recv) = conn.open_bi().await.e()?; + let (_send, mut recv) = conn.open_bi().await.anyerr()?; let response = recv.read_to_end(1000).await.unwrap_err(); assert!(format!("{response:#?}").contains("not allowed")); - r1.shutdown().await.e()?; + r1.shutdown().await.anyerr()?; e2.close().await; Ok(()) @@ -689,7 +684,7 @@ mod tests { let conn = endpoint2.connect(addr, TEST_ALPN).await?; eprintln!("starting shutdown"); - router.shutdown().await.e()?; + router.shutdown().await.anyerr()?; eprintln!("waiting for closed conn"); let reason = conn.closed().await; diff --git a/iroh/src/test_utils.rs b/iroh/src/test_utils.rs index 3900703d920..99289aea032 100644 --- a/iroh/src/test_utils.rs +++ b/iroh/src/test_utils.rs @@ -519,7 +519,7 @@ pub(crate) mod pkarr_dns_state { #[cfg(test)] mod tests { use iroh_base::EndpointId; - use n0_snafu::Result; + use n0_error::Result; #[test] fn test_endpoint_id_from_domain_name() -> Result { diff --git a/iroh/src/tls/resolver.rs b/iroh/src/tls/resolver.rs index 1acb5f98a42..5f70937e8d5 100644 --- a/iroh/src/tls/resolver.rs +++ b/iroh/src/tls/resolver.rs @@ -2,8 +2,7 @@ use std::sync::Arc; use ed25519_dalek::pkcs8::{EncodePrivateKey, spki::der::pem::LineEnding}; use iroh_base::SecretKey; -use nested_enum_utils::common_fields; -use snafu::Snafu; +use n0_error::stack_error; use webpki_types::{CertificateDer, PrivatePkcs8KeyDer, pem::PemObject}; #[derive(Debug)] @@ -12,16 +11,11 @@ pub(super) struct AlwaysResolvesCert { } /// Error for generating TLS configs. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: n0_snafu::SpanTrace, -})] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources, std_sources)] #[non_exhaustive] pub(super) enum CreateConfigError { /// Rustls configuration error - #[snafu(display("rustls error"), context(false))] + #[error("rustls error")] Rustls { source: rustls::Error }, } diff --git a/iroh/tests/integration.rs b/iroh/tests/integration.rs index a0abaa3d129..0db3df01844 100644 --- a/iroh/tests/integration.rs +++ b/iroh/tests/integration.rs @@ -13,11 +13,11 @@ use iroh::{ Endpoint, RelayMode, discovery::{Discovery, pkarr::PkarrResolver}, }; +use n0_error::{Result, StdResultExt}; use n0_future::{ StreamExt, task, time::{self, Duration}, }; -use n0_snafu::{Result, ResultExt}; #[cfg(not(wasm_browser))] use tokio::test; use tracing::{Instrument, info_span}; @@ -52,7 +52,7 @@ async fn simple_endpoint_id_based_connection_transfer() -> Result { tracing::info!("waiting for server to go online"); time::timeout(Duration::from_secs(12), server.online()) .await - .context("server endpoint took too long to get online")?; + .std_context("server endpoint took too long to get online")?; // Make the server respond to requests with an echo task::spawn({ @@ -61,24 +61,24 @@ async fn simple_endpoint_id_based_connection_transfer() -> Result { async move { while let Some(incoming) = server.accept().await { tracing::info!("accepting connection"); - let conn = incoming.await.e()?; + let conn = incoming.await.anyerr()?; let endpoint_id = conn.remote_id()?; tracing::info!(endpoint_id = %endpoint_id.fmt_short(), "Accepted connection"); - let (mut send, mut recv) = conn.accept_bi().await.e()?; + let (mut send, mut recv) = conn.accept_bi().await.anyerr()?; let mut bytes_sent = 0; - while let Some(chunk) = recv.read_chunk(10_000, true).await.e()? { + while let Some(chunk) = recv.read_chunk(10_000, true).await.anyerr()? { bytes_sent += chunk.bytes.len(); - send.write_chunk(chunk.bytes).await.e()?; + send.write_chunk(chunk.bytes).await.anyerr()?; } - send.finish().e()?; + send.finish().anyerr()?; tracing::info!("Copied over {bytes_sent} byte(s)"); let code = conn.closed().await; tracing::info!("Closed with code: {code:?}"); } - Ok::<_, n0_snafu::Error>(()) + n0_error::Ok(()) } .instrument(info_span!("server")) }); @@ -111,18 +111,18 @@ async fn simple_endpoint_id_based_connection_transfer() -> Result { } }) .await - .e()?; + .anyerr()?; tracing::info!(to = %server.id().fmt_short(), "Opening a connection"); let conn = client.connect(server.id(), ECHO_ALPN).await?; tracing::info!("Connection opened"); - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(b"Hello, World!").await.e()?; - send.finish().e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(b"Hello, World!").await.anyerr()?; + send.finish().anyerr()?; tracing::info!("Sent request"); - let response = recv.read_to_end(10_000).await.e()?; + let response = recv.read_to_end(10_000).await.anyerr()?; tracing::info!(len = response.len(), "Received response"); assert_eq!(&response, b"Hello, World!");