From c68125d48d8e546bf2dce27ca311da6896b47b13 Mon Sep 17 00:00:00 2001 From: eltitanb Date: Wed, 2 Apr 2025 15:03:57 +0100 Subject: [PATCH 01/52] bump version --- crates/signer/src/proto/v1.rs | 205 +++++++++++++++++++++++----------- 1 file changed, 141 insertions(+), 64 deletions(-) diff --git a/crates/signer/src/proto/v1.rs b/crates/signer/src/proto/v1.rs index ba8012c3..36984aa0 100644 --- a/crates/signer/src/proto/v1.rs +++ b/crates/signer/src/proto/v1.rs @@ -24,8 +24,7 @@ impl ResponseState { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic - /// use. + /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Self::Unknown => "UNKNOWN", @@ -90,9 +89,10 @@ pub mod lister_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct ListerClient { inner: tonic::client::Grpc, @@ -136,15 +136,16 @@ pub mod lister_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { ListerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -175,11 +176,18 @@ pub mod lister_client { pub async fn list_accounts( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Lister/ListAccounts"); let mut req = request.into_request(); @@ -239,9 +247,10 @@ pub mod account_manager_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct AccountManagerClient { inner: tonic::client::Grpc, @@ -285,15 +294,16 @@ pub mod account_manager_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { AccountManagerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -324,11 +334,18 @@ pub mod account_manager_client { pub async fn unlock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Unlock"); let mut req = request.into_request(); @@ -338,11 +355,18 @@ pub mod account_manager_client { pub async fn lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Lock"); let mut req = request.into_request(); @@ -352,14 +376,25 @@ pub mod account_manager_client { pub async fn generate( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Generate"); + let path = http::uri::PathAndQuery::from_static( + "/v1.AccountManager/Generate", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.AccountManager", "Generate")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.AccountManager", "Generate")); self.inner.unary(req, path, codec).await } } @@ -486,9 +521,10 @@ pub mod signer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct SignerClient { inner: tonic::client::Grpc, @@ -532,15 +568,16 @@ pub mod signer_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { SignerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -572,9 +609,14 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Sign"); let mut req = request.into_request(); @@ -584,10 +626,18 @@ pub mod signer_client { pub async fn multisign( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Multisign"); let mut req = request.into_request(); @@ -598,39 +648,66 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestation"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestation", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_attestations( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestations"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestations", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_proposal( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconProposal"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconProposal", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); self.inner.unary(req, path, codec).await } } From d9979a239eaf6fc0365a30c39cf81264d34d9edd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 5 May 2025 17:13:43 -0400 Subject: [PATCH 02/52] Successful cross-compilation, but runtime has memory allocation issues --- provisioning/signer.Dockerfile | 70 +++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 85c2be43..bc258b47 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,22 +1,72 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM WORKDIR /app -FROM chef AS planner +# Planner stage +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +# Builder stage +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY --from=planner /app/recipe.json recipe.json - -RUN cargo chef cook --release --recipe-path recipe.json - -RUN apt-get update && apt-get install -y protobuf-compiler - COPY . . -RUN cargo build --release --bin commit-boost-signer +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + TARGET="aarch64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ + export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + rustup target add x86_64-unknown-linux-gnu && \ + TARGET="x86_64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + fi && \ + # Build the signer - general setup that works with or without cross-compilation + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ + fi -FROM debian:bookworm-20240904-slim AS runtime +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ From 97ef653d602dbf9397de54abdc48ba21f063eb9e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:09:49 -0400 Subject: [PATCH 03/52] Working with OpenSSL static-linked --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 3 +++ crates/common/Cargo.toml | 4 ++++ crates/common/build.rs | 8 ++++++++ provisioning/signer.Dockerfile | 20 +++++++++++--------- 5 files changed, 38 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..436d3b65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,6 +1494,8 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", + "k256", + "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3550,6 +3552,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.0+3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.106" @@ -3558,6 +3569,7 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index aef26a94..14cddf82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,9 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" +[workspace.features] +openssl-vendored = ["crates/common/openssl-vendored"] + [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..15c0b8d1 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,3 +41,7 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true +openssl = { version = "0.10", optional = true, features = ["vendored"] } + +[features] +openssl-vendored = ["openssl/vendored"] diff --git a/crates/common/build.rs b/crates/common/build.rs index 9bd10ecb..c24a54cb 100644 --- a/crates/common/build.rs +++ b/crates/common/build.rs @@ -1,6 +1,14 @@ use std::process::Command; fn main() { + let target = std::env::var("TARGET").unwrap(); + let host = std::env::var("HOST").unwrap(); + + if target != host { + println!("cargo:warning=Skipping build script because TARGET != HOST"); + return; + } + let output = Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); let git_hash = String::from_utf8(output.stdout).unwrap(); println!("cargo:rustc-env=GIT_HASH={git_hash}"); diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index bc258b47..523a2ff4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,16 +35,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars - dpkg --add-architecture arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + #dpkg --add-architecture arm64 && \ apt update && \ apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - rustup target add aarch64-unknown-linux-gnu && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ - export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + # export PKG_CONFIG_ALLOW_CROSS="true" && \ + # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars dpkg --add-architecture amd64 && \ @@ -55,12 +56,13 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ TARGET_FLAG="--target=${TARGET}" && \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ From 91eefe2de57a28c6ddbda38666046cbc711f93d6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:43:02 -0400 Subject: [PATCH 04/52] Got dynamic linking working, added a feature flag to toggle dynamic vs. static --- provisioning/signer.Dockerfile | 49 ++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 523a2ff4..3c29075d 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,17 +1,17 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app # Planner stage FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY --from=planner /app/recipe.json recipe.json COPY . . @@ -34,30 +34,45 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ - #dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + apt install -y gcc-aarch64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - # export PKG_CONFIG_ALLOW_CROSS="true" && \ - # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="x86_64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ From de09415b8fd994f1b74ed772787aabfd4ac52234 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 13:13:55 -0400 Subject: [PATCH 05/52] Fixed the vendored build arg --- provisioning/signer.Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 3c29075d..984ba9b4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -46,7 +46,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="aarch64-unknown-linux-gnu" && \ @@ -66,7 +67,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="x86_64-unknown-linux-gnu" && \ From 3aee63d1a00c70fce4e86a1a1600f134a2437b41 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 15:35:58 -0400 Subject: [PATCH 06/52] Reintroduced the cargo chef setup --- provisioning/signer.Dockerfile | 104 ++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 41 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 984ba9b4..6de707f0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -12,72 +12,94 @@ RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip -# Build the application +# Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture arm64 && \ apt update && \ apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="aarch64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture amd64 && \ apt update && \ apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="x86_64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + source ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files +COPY . . + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ fi && \ - # Build the signer - general setup that works with or without cross-compilation - # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From c07c71784ee4c557f8fb778f9db2ef0b328624ae Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 16:36:57 -0400 Subject: [PATCH 07/52] Ported the cross-compilation stuff into PBS --- provisioning/pbs.Dockerfile | 112 ++++++++++++++++++++++++++++++--- provisioning/signer.Dockerfile | 22 ++++--- 2 files changed, 120 insertions(+), 14 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 200c95d2..cac14de0 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,22 +1,120 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app -FROM chef AS planner +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --recipe-path recipe.json +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi -RUN apt-get update && apt-get install -y protobuf-compiler +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Now we can copy the source files - chef cook wants to run before this step COPY . . -RUN cargo build --release --bin commit-boost-pbs +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip -FROM debian:bookworm-20240904-slim AS runtime +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ + fi + +# Assemble the runner image +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6de707f0..354afee0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,8 +35,6 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 @@ -57,22 +55,26 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ fi # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - source ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} -# Now we can copy the source files +# Now we can copy the source files - chef cook wants to run before this step COPY . . # Get the latest Protoc since the one in the Debian repo is incredibly old @@ -95,11 +97,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From 699b7ec9eeb4fe2c5d1398095047b82df81afc26 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 13:52:08 -0400 Subject: [PATCH 08/52] Split the dockerfiles into separate builder / image definitions --- .gitignore | 3 +- build-linux.sh | 144 +++++++++++++++++++++++++++++++++ provisioning/build.Dockerfile | 120 +++++++++++++++++++++++++++ provisioning/cli.Dockerfile | 0 provisioning/pbs.Dockerfile | 137 +++---------------------------- provisioning/signer.Dockerfile | 6 +- 6 files changed, 277 insertions(+), 133 deletions(-) create mode 100755 build-linux.sh create mode 100644 provisioning/build.Dockerfile create mode 100644 provisioning/cli.Dockerfile diff --git a/.gitignore b/.gitignore index b8eaa77a..e48792b4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # will have compiled files and executables debug/ target/ +build/ # These are backup files generated by rustfmt **/*.rs.bk @@ -14,4 +15,4 @@ cb.docker-compose.yml targets.json .idea/ logs -.vscode/ \ No newline at end of file +.vscode/ diff --git a/build-linux.sh b/build-linux.sh new file mode 100755 index 00000000..a7266bd9 --- /dev/null +++ b/build-linux.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# This script will build the Commit-Boost applications and modules for local Linux development. + +# ================= +# === Functions === +# ================= + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + + +# Builds the CLI binaries for Linux +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_cli() { + echo "Building CLI binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + echo "done!" +} + + +# Builds the PBS module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_pbs() { + echo "Building PBS binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building PBS Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." + fi + echo "done!" +} + + +# Builds the Signer module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_signer() { + echo "Building Signer binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building Signer Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." + fi + echo "done!" +} + + +# Print usage +usage() { + echo "Usage: build.sh [options] -v " + echo "This script assumes it is in the commit-boost-client repository directory." + echo "Options:" + echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' + echo $'\t-c\tBuild the Commit-Boost CLI binaries' + echo $'\t-p\tBuild the PBS module binary and its Docker container' + echo $'\t-s\tBuild the Signer module binary and its Docker container' + echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' + exit 0 +} + + +# ================= +# === Main Body === +# ================= + +# Parse arguments +while getopts "acpsov:" FLAG; do + case "$FLAG" in + a) CLI=true PBS=true SIGNER=true ;; + c) CLI=true ;; + p) PBS=true ;; + s) SIGNER=true ;; + o) LOCAL_UPLOAD=true ;; + v) VERSION="$OPTARG" ;; + *) usage ;; + esac +done +if [ -z "$VERSION" ]; then + usage +fi + +# Cleanup old artifacts +rm -rf build/$VERSION/* +mkdir -p build/$VERSION + +# Make a multiarch builder, ignore if it's already there +docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 +# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: +# https://stackoverflow.com/a/73585243 + +# Build the artifacts +if [ "$CLI" = true ]; then + build_cli +fi +if [ "$PBS" = true ]; then + build_pbs +fi +if [ "$SIGNER" = true ]; then + build_signer +fi diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile new file mode 100644 index 00000000..83679ed5 --- /dev/null +++ b/provisioning/build.Dockerfile @@ -0,0 +1,120 @@ +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +WORKDIR /app + +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) +ENV BUILD_VAR_SCRIPT=/tmp/env.sh +COPY --from=planner /app/recipe.json recipe.json + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files - chef cook wants to run before this step +COPY . . + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ + fi + +# Copy the output +FROM scratch AS output +ARG TARGET_CRATE +COPY --from=builder /app/target/release/${TARGET_CRATE} /${TARGET_CRATE} diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile new file mode 100644 index 00000000..e69de29b diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index cac14de0..9eff5890 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,138 +1,19 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-pbs /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 354afee0..6c5ac045 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -115,6 +115,7 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ fi +# Assemble the runner image FROM debian:bookworm-slim AS runtime WORKDIR /app @@ -133,7 +134,4 @@ RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] \ No newline at end of file From 7165f129ae7a299b69649c7904ef3b30787ee86e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 17:48:42 -0400 Subject: [PATCH 09/52] Added a build guide --- docs/docs/get_started/building.md | 185 ++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 docs/docs/get_started/building.md diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md new file mode 100644 index 00000000..d38b447f --- /dev/null +++ b/docs/docs/get_started/building.md @@ -0,0 +1,185 @@ +# Building Commit-Boost from Source + +Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). This guide will walk you through the setup required to build them from source. It assumes you are on a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS). For other systems, please adapt the steps for your system's package manager accordingly. + + +## Building via the Docker Builder + +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. + +To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. + +:::note +The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +::: + +We provide a build script called `build-linux.sh` to automate the process: + +``` +$ ./build-linux.sh +Usage: build.sh [options] -v +This script assumes it is in the commit-boost-client repository directory. +Options: + -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) + -c Build the Commit-Boost CLI binaries + -p Build the PBS module binary and its Docker container + -s Build the Signer module binary and its Docker container + -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY +``` + +The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. + +The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. + + +## Building Manually + +If you don't want to use the Docker builder, you can compile the Commit-Boost artifacts locally. The following instructions assume a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS) for simplicity. For other systems, please adapt any relevant instructions to your environment accordingly. + + +### Prerequisites + +Requirements: + +- Rust 1.83+ +- GCC (or another C compiler of your choice) +- OpenSSL development libraries +- Protobuf Compiler (`protoc`) + +Start by installing Rust if you don't already have it. Follow [the official directions](https://www.rust-lang.org/learn/get-started) to install it and bring it up to date. + +Install the dependencies: + +```bash +sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-dev build-essential pkg-config curl +``` + +Install the Protobuf compiler: + +:::note +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +::: + +```bash +PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') +MACHINE_ARCH=$(uname -m) +case "${MACHINE_ARCH}" in + aarch64) PROTOC_ARCH=aarch_64;; + x86_64) PROTOC_ARCH=x86_64;; + *) echo "${MACHINE_ARCH} is not supported."; exit 1;; +esac +curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip +sudo unzip -q protoc.zip bin/protoc -d /usr +sudo unzip -q protoc.zip "include/google/*" -d /usr +sudo chmod a+x /usr/bin/protoc +rm -rf protoc.zip +``` + +With the prerequisites set up, pull the repository: +```bash +git clone https://github.com/Commit-Boost/commit-boost-client +``` + +Check out the `stable` branch which houses the latest release: +```bash +cd commit-boost-client && git checkout stable +``` + +Finally, update the submodules: +``` +git submodule update --init --recursive +``` + +Your build environment should now be ready to use. + + +### Building the CLI + +To build the CLI, run: +``` +cargo build --release --bin commit-boost-cli +``` + +This will create a binary in `./target/release/commit-boost-cli`. Confirm that it works: +``` +./target/release/commit-boost-cli --version +``` + +You can now use this to generate the Docker Compose file to drive the other modules if desired. See the [configuration](./configuration.md) guide for more information. + + +### Building the PBS Module + +To build PBS, run: +``` +cargo build --release --bin commit-boost-pbs +``` + +This will create a binary in `./target/release/commit-boost-pbs`. To verify it works, create [a TOML configuration](./configuration.md) for the PBS module (e.g., `cb-config.toml`). + +As a quick example, we'll use this configuration that connects to the Flashbots relay on the Hoodi network: +```toml +chain = "Hoodi" + +[pbs] +port = 18550 +with_signer = true + +[[relays]] +url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" + +[metrics] +enabled = true + +[signer] +[signer.local.loader] +format = "lighthouse" +keys_path = "/tmp/keys" +secrets_path = "/tmp/secrets" +``` + +Set the path to it in the `CB_CONFIG` environment variable and run the binary: +``` +CB_CONFIG=cb-config.toml ./target/release/commit-boost-pbs +``` + +If it works, you should see output like this: +``` +2025-05-07T21:09:17.407245Z WARN No metrics server configured +2025-05-07T21:09:17.407257Z INFO starting PBS service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" addr=127.0.0.1:18550 events_subs=0 chain=Hoodi +2025-05-07T21:09:17.746855Z INFO : new request ua="" relay_check=true method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +2025-05-07T21:09:17.896196Z INFO : relay check successful method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +``` + +If you do, then the binary works. + + +### Building the Signer Module + +To build the Signer, run: +``` +cargo build --release --bin commit-boost-signer +``` + +This will create a binary in `./target/release/commit-boost-signer`. To verify it works, create [a TOML configuration](./configuration.md) for the Signer module (e.g., `cb-config.toml`). We'll use the example in the PBS build section above. + +The signer needs the following environment variables set: +- `CB_CONFIG` = path of your config file. +- `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. +- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. + +Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: + +``` +mkdir -p /tmp/keys && mkdir -p /tmp/secrets +CB_CONFIG=cb-config.toml CB_JWTS="test_jwts=dummy" CB_SIGNER_PORT=20000 ./target/release/commit-boost-signer +``` + +You should see output like this: +``` +2025-05-07T21:43:46.385535Z WARN Proxy store not configured. Proxies keys and delegations will not be persisted +2025-05-07T21:43:46.393507Z INFO Starting signing service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" modules=["test_jwts"] port=20000 loaded_consensus=0 loaded_proxies=0 +2025-05-07T21:43:46.393574Z WARN No metrics server configured +``` + +If you do, then the binary works. \ No newline at end of file From 9438dae97bbb5d13032519c34ca9ad4e7c468137 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:25:24 -0400 Subject: [PATCH 10/52] Refactored the Github release action to use the Docker builder --- .github/workflows/release.yml | 159 ++++++++++++++++++++++++++------- provisioning/build.Dockerfile | 21 +---- provisioning/cli.Dockerfile | 0 provisioning/protoc.sh | 57 ++++++++++++ provisioning/signer.Dockerfile | 134 ++------------------------- 5 files changed, 194 insertions(+), 177 deletions(-) delete mode 100644 provisioning/cli.Dockerfile create mode 100755 provisioning/protoc.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 906c01f3..40745fbb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,11 +10,86 @@ permissions: packages: write jobs: - build-binaries: + # Builds the x64 and arm64 binaries for Linux, for all 3 crates, via the Docker builder + build-binaries-linux: strategy: matrix: target: - - x86_64-unknown-linux-gnu + - amd64 + - arm64 + name: + - commit-boost-cli + - commit-boost-pbs + - commit-boost-signer + include: + - target: amd64 + package-suffix: x86-64 + - target: arm64 + package-suffix: arm64 + - name: commit-boost-cli + target-crate: cli + - name: commit-boost-pbs + target-crate: pbs + - name: commit-boost-signer + target-crate: signer + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: "stable" + fetch-depth: 0 + submodules: true + + - name: Log commit hash + run: | + echo "Releasing commit: $(git rev-parse HEAD)" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build binary (Linux) + uses: docker/build-push-action@v6 + with: + context: . + push: false + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate}} + cache-to: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate }},mode=max + file: provisioning/build.Dockerfile + outputs: type=local,dest=build + build-args: | + TARGET_CRATE=${{ matrix.name }} + + - name: Package binary (Linux) + run: | + cd build/linux_${{ matrix.target }} + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ../../ + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }} + path: | + ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz + + # Builds the arm64 binaries for Darwin, for all 3 crates, natively + build-binaries-darwin: + strategy: + matrix: + target: + # x64 requires macos-latest-large which is not available in the free tier # - x86_64-apple-darwin - aarch64-apple-darwin name: @@ -22,10 +97,8 @@ jobs: - commit-boost-pbs - commit-boost-signer include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-latest # - target: x86_64-apple-darwin - # os: macos-latest + # os: macos-latest-large - target: aarch64-apple-darwin os: macos-latest runs-on: ${{ matrix.os }} @@ -41,6 +114,12 @@ jobs: run: | echo "Releasing commit: $(git rev-parse HEAD)" + - name: Install Protoc + run: + # Brew's version is much more up to date than the Linux ones, and installing the latest via script runs into curl issues so for now, brew's easier to use + # provisioning/protoc.sh + brew install protobuf + - name: Cache Cargo registry uses: actions/cache@v3 with: @@ -63,48 +142,25 @@ jobs: ${{ runner.os }}-cargo-build-${{ matrix.target }}- ${{ runner.os }}-cargo-build- - - name: Install protoc (Ubuntu) - if: runner.os == 'Linux' - run: sudo apt-get install protobuf-compiler - - - name: Install protoc (macOS) - if: runner.os == 'macOS' - run: brew install protobuf - - - name: Set up Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - target: ${{ matrix.target }} - - - name: Build binary + - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - env: - CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER: gcc - name: Package binary (Unix) - if: runner.os != 'Windows' run: | cd target/${{ matrix.target }}/release tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ - - name: Package binary (Windows) - if: runner.os == 'Windows' - run: | - 7z a ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.zip target\${{ matrix.target }}\release\${{ matrix.name }}.exe - - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.${{ runner.os == 'Windows' && 'zip' || 'tar.gz' }} + ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + # Builds the PBS Docker image build-and-push-pbs-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -114,6 +170,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-amd64 + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_arm64/commit-boost-pbs-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -133,6 +203,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} @@ -140,8 +212,9 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile + # Builds the Signer Docker image build-and-push-signer-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -151,6 +224,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_x86-64/commit-boost-signer-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-amd64 + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_arm64/commit-boost-signer-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -170,6 +257,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} @@ -177,9 +266,11 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile + # Creates a draft release on GitHub with the binaries finalize-release: needs: - - build-binaries + - build-binaries-linux + - build-binaries-darwin - build-and-push-pbs-docker - build-and-push-signer-docker runs-on: ubuntu-latest diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 83679ed5..a4eb3723 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -14,23 +14,6 @@ RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - # Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 @@ -90,6 +73,10 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Get the latest Protoc since the one in the Debian repo is incredibly old +COPY provisioning/protoc.sh provisioning/protoc.sh +RUN provisioning/protoc.sh + # Now we can copy the source files - chef cook wants to run before this step COPY . . diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile deleted file mode 100644 index e69de29b..00000000 diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh new file mode 100755 index 00000000..7f66a656 --- /dev/null +++ b/provisioning/protoc.sh @@ -0,0 +1,57 @@ +#!/bin/sh + +# This script installs the latest version of protoc (Protocol Buffers Compiler) from the official GitHub repository. + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + +# Get the OS +case "$(uname)" in + Darwin*) + PROTOC_OS="osx" ; + TARGET_DIR="/opt/homebrew" ; # Emulating a homebrew install so we don't need elevated permissions + # Darwin comes with unzip and curl already + brew install jq ;; + Linux*) + PROTOC_OS="linux" ; + TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually + apt update && apt install -y unzip curl ca-certificates jq ;; + *) + echo "Unsupported OS: $(uname)" ; + exit 1 ;; +esac + +# Get the architecture +case "$(uname -m)" in + x86_64) PROTOC_ARCH="x86_64" ;; + aarch64) PROTOC_ARCH="aarch_64" ;; + arm64) PROTOC_ARCH="aarch_64" ;; + *) echo "Unsupported architecture: [$(uname -m)]"; exit 1 ;; +esac + +# Get the latest version +PROTOC_RAW_VERSION=$(curl --retry 10 --retry-delay 2 --retry-all-errors -fsL "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | jq -r .tag_name) || fail "Failed to get the latest version of protoc" +if [ "$PROTOC_RAW_VERSION" = "null" ]; then + fail "Failed to get the latest version of protoc" +fi +echo "Latest version of protoc: [$PROTOC_RAW_VERSION]" +PROTOC_VERSION=$(echo $PROTOC_RAW_VERSION | sed 's/^v//') || fail "Failed to parse the latest version of protoc" +if [ -z "$PROTOC_VERSION" ]; then + fail "Latest version of protoc was empty" +fi + +echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" + +# Download and install protoc +curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" +unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +rm -rf protoc.zip || fail "Failed to remove protoc zip file" +echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6c5ac045..f9824e7a 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,135 +1,17 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -# Planner stage -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -# Builder stage -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-signer /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost From 12c020a20af91f673e348b12f2bc561fe57a6ae4 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:53:02 -0400 Subject: [PATCH 11/52] Fixed the Docker image binary filenames --- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eff5890..9eb72702 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index f9824e7a..05679762 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From 53cafc039a747e61a92b9fb41a9a53a395f1a1a0 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 17:42:28 -0400 Subject: [PATCH 12/52] Cleaned up the Darwin artifact step --- .github/workflows/release.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40745fbb..5be42110 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -98,9 +98,11 @@ jobs: - commit-boost-signer include: # - target: x86_64-apple-darwin - # os: macos-latest-large + # os: macos-latest-large + # package-suffix: x86-64 - target: aarch64-apple-darwin os: macos-latest + package-suffix: arm64 runs-on: ${{ matrix.os }} steps: - name: Checkout code @@ -145,18 +147,18 @@ jobs: - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - - name: Package binary (Unix) + - name: Package binary (Darwin) run: | cd target/${{ matrix.target }}/release - tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} - mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ../../../ - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} + name: ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz # Builds the PBS Docker image build-and-push-pbs-docker: @@ -208,8 +210,6 @@ jobs: tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile # Builds the Signer Docker image @@ -262,8 +262,6 @@ jobs: tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/signer:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile # Creates a draft release on GitHub with the binaries From 58c61174c138f61a775031c2c28b00dac5038c64 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 00:24:35 -0400 Subject: [PATCH 13/52] Made the CI workflow and justfile use the same toolchain as the source --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..ae9bad89 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2025-02-26 + toolchain: 1.83 components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index e6d11f62..b9250870 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the nightly-2025-02-26 toolchain is installed -toolchain := "nightly-2025-02-26" +# Makes sure the same toolchain as the source is installed +toolchain := 1.83 fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 45e581baabbed9ba7987c3260b286a877a22480b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 01:06:05 -0400 Subject: [PATCH 14/52] Revert "Made the CI workflow and justfile use the same toolchain as the source" This reverts commit 58c61174c138f61a775031c2c28b00dac5038c64. --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae9bad89..894d13da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: 1.83 + toolchain: nightly-2025-02-26 components: clippy, rustfmt - name: Install protoc - run: sudo provisioning/protoc.sh + run: sudo apt-get install protobuf-compiler - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index b9250870..e6d11f62 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the same toolchain as the source is installed -toolchain := 1.83 +# Makes sure the nightly-2025-02-26 toolchain is installed +toolchain := "nightly-2025-02-26" fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 24a10c55f3bd558ad976852255b351bed31ef641 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:26:15 -0400 Subject: [PATCH 15/52] Testing removal of OpenSSL vendored option --- Cargo.lock | 12 -------- Cargo.toml | 3 -- crates/common/Cargo.toml | 4 --- provisioning/build.Dockerfile | 56 +++++++++++------------------------ 4 files changed, 17 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 436d3b65..5ebc811a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,8 +1494,6 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", - "k256", - "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3552,15 +3550,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-src" -version = "300.5.0+3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.106" @@ -3569,7 +3558,6 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index 14cddf82..aef26a94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,6 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" -[workspace.features] -openssl-vendored = ["crates/common/openssl-vendored"] - [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 15c0b8d1..df78b046 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,7 +41,3 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true -openssl = { version = "0.10", optional = true, features = ["vendored"] } - -[features] -openssl-vendored = ["openssl/vendored"] diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index a4eb3723..34ad27a5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,15 +1,15 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE WORKDIR /app FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE COPY . . RUN cargo chef prepare --recipe-path recipe.json FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json @@ -18,43 +18,33 @@ COPY --from=planner /app/recipe.json recipe.json RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ + dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ fi # Run cook to prep the build @@ -64,14 +54,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old COPY provisioning/protoc.sh provisioning/protoc.sh @@ -88,14 +72,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ From e36da545b00929146efbfa60eac1df0efb512d5e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:59:32 -0400 Subject: [PATCH 16/52] Updating just in the CI workflow --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,12 +30,12 @@ jobs: components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 with: - just-version: 1.5.0 + just-version: 1.40.0 - name: Check compilation run: cargo check From e7c6d193b15232dfa51e09f61c075e3c9941a18d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 02:56:00 -0400 Subject: [PATCH 17/52] Refactored the signer to support host and port config settings --- crates/cli/src/docker_init.rs | 34 +++++++++++++++++------ crates/common/src/config/constants.rs | 2 +- crates/common/src/config/signer.rs | 37 +++++++++++++++++++------ crates/common/src/signer/constants.rs | 1 + crates/common/src/signer/mod.rs | 2 ++ crates/signer/src/service.rs | 7 ++--- docs/docs/get_started/configuration.md | 8 ++++++ docs/docs/get_started/running/binary.md | 4 +-- 8 files changed, 72 insertions(+), 23 deletions(-) create mode 100644 crates/common/src/signer/constants.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 4453f597..652e3448 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -14,11 +14,11 @@ use cb_common::{ PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, - SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, - SIGNER_MODULE_NAME, SIGNER_PORT_ENV, SIGNER_URL_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, + SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::ModuleId, utils::random_jwt_secret, }; @@ -73,7 +73,11 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = 20000; + let signer_port = if let Some(signer_config) = &cb_config.signer { + signer_config.port + } else { + DEFAULT_SIGNER_PORT + }; let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() @@ -334,10 +338,17 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); @@ -459,13 +470,20 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 422af7e7..d7799146 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -33,7 +33,7 @@ pub const SIGNER_IMAGE_DEFAULT: &str = "ghcr.io/commit-boost/signer:latest"; pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server -pub const SIGNER_PORT_ENV: &str = "CB_SIGNER_PORT"; +pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 9df6b948..dce97666 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr}, + path::PathBuf, +}; use eyre::{bail, OptionExt, Result}; use serde::{Deserialize, Serialize}; @@ -6,18 +10,25 @@ use tonic::transport::{Certificate, Identity}; use url::Url; use super::{ - constants::SIGNER_IMAGE_DEFAULT, load_jwt_secrets, utils::load_env_var, CommitBoostConfig, - SIGNER_PORT_ENV, + load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::{Chain, ModuleId}, + utils::{default_host, default_u16}, }; #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct SignerConfig { + /// Host address to listen for signer API calls on + #[serde(default = "default_host")] + pub host: Ipv4Addr, + /// Port to listen for signer API calls on + #[serde(default = "default_u16::")] + pub port: u16, /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, @@ -87,7 +98,7 @@ pub struct StartSignerConfig { pub chain: Chain, pub loader: Option, pub store: Option, - pub server_port: u16, + pub endpoint: SocketAddr, pub jwts: HashMap, pub dirk: Option, } @@ -97,7 +108,17 @@ impl StartSignerConfig { let config = CommitBoostConfig::from_env_path()?; let jwts = load_jwt_secrets()?; - let server_port = load_env_var(SIGNER_PORT_ENV)?.parse()?; + + // Load the server endpoint first from the env var, then the config, and finally + // the defaults + let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { + endpoint.parse()? + } else { + match config.signer { + Some(ref signer) => SocketAddr::from((signer.host, signer.port)), + None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), + } + }; let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; @@ -105,7 +126,7 @@ impl StartSignerConfig { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), - server_port, + endpoint, jwts, store, dirk: None, @@ -133,7 +154,7 @@ impl StartSignerConfig { Ok(StartSignerConfig { chain: config.chain, - server_port, + endpoint, jwts, loader: None, store, diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs new file mode 100644 index 00000000..aa834f91 --- /dev/null +++ b/crates/common/src/signer/constants.rs @@ -0,0 +1 @@ +pub const DEFAULT_SIGNER_PORT: u16 = 20000; diff --git a/crates/common/src/signer/mod.rs b/crates/common/src/signer/mod.rs index e0a164a7..b6dce29d 100644 --- a/crates/common/src/signer/mod.rs +++ b/crates/common/src/signer/mod.rs @@ -1,8 +1,10 @@ +mod constants; mod loader; mod schemes; mod store; mod types; +pub use constants::*; pub use loader::*; pub use schemes::*; pub use store::*; diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 28a1d934..a965f057 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, net::SocketAddr, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use axum::{ extract::{Request, State}, @@ -67,7 +67,7 @@ impl SigningService { let loaded_consensus = state.manager.read().await.available_consensus_signers(); let loaded_proxies = state.manager.read().await.available_proxy_signers(); - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, port =? config.server_port, loaded_consensus, loaded_proxies, "Starting signing service"); + info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); SigningService::init_metrics(config.chain)?; @@ -81,8 +81,7 @@ impl SigningService { .route_layer(middleware::from_fn(log_request)) .route(STATUS_PATH, get(handle_status)); - let address = SocketAddr::from(([0, 0, 0, 0], config.server_port)); - let listener = TcpListener::bind(address).await?; + let listener = TcpListener::bind(config.endpoint).await?; axum::serve(listener, app).await.wrap_err("signer server exited") } diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 4e642205..5d196619 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -65,6 +65,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "keys" @@ -111,6 +113,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "teku" keys_path = "keys" @@ -133,6 +137,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lodestar" keys_path = "keys" @@ -299,6 +305,8 @@ port = 18550 url = "" [signer] +port = 20000 + [signer.loader] format = "lighthouse" keys_path = "/path/to/keys" diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 3708ab19..ea5138c6 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -22,12 +22,12 @@ Modules need some environment variables to work correctly. ### PBS Module - `CB_BUILDER_URLS`: optional, comma-separated list of urls to `events` modules where to post builder events. -- `CB_PBS_ENDPOINT`: optional, override the endpoint where the PBS module will open the port for the beacon node. +- `CB_PBS_ENDPOINT`: optional, override to specify the `IP:port` endpoint where the PBS module will open the port for the beacon node. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module - `CB_SIGNER_JWT_SECRET`: secret to use for JWT authentication with the Signer module. -- `CB_SIGNER_PORT`: required, port to open the signer server on. +- `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). - `CB_SIGNER_LOADER_FORMAT`, `CB_SIGNER_LOADER_KEYS_DIR` and `CB_SIGNER_LOADER_SECRETS_DIR`: paths to the `keys` and `secrets` directories or files (ERC-2335 style keystores, see [Signer config](../configuration/#signer-module) for more info). From 6117219d62f6243d263fdbabddc5bb387bfd2857 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:24:02 -0400 Subject: [PATCH 18/52] Updated docs --- docs/docs/get_started/building.md | 1 - docs/docs/get_started/configuration.md | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..f831de57 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -166,7 +166,6 @@ This will create a binary in `./target/release/commit-boost-signer`. To verify i The signer needs the following environment variables set: - `CB_CONFIG` = path of your config file. - `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. -- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 5d196619..efe9da3f 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -39,6 +39,13 @@ Commit-Boost supports both local and remote signers. The signer module is respon To start a local signer module, you need to include its parameters in the config file ```toml +[pbs] +... +with_signer = true + +[signer] +port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "/path/to/keys" @@ -64,6 +71,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -89,7 +100,13 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] + port = 20000 + [signer.local.loader] format = "prysm" keys_path = "wallet/direct/accounts/all-accounts.keystore.json" @@ -112,6 +129,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -136,6 +157,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 From c0f591d5656aed3f2b705583bcd95d88abe45394 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:41:09 -0400 Subject: [PATCH 19/52] Fixing Clippy in CI workflow --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b15367f..3be3a7da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,9 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt + + - name: Install Clippy on prod toolchain + run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From adbd34a02d52a86251258cc82be5f1ebf47474fe Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:07:51 -0400 Subject: [PATCH 20/52] Removed obviated CI setup --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3be3a7da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,9 +28,6 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt - - - name: Install Clippy on prod toolchain - run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From e3488b34f8629fe65071688165c17579d4b9fd23 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 20 May 2025 15:40:27 -0400 Subject: [PATCH 21/52] Minor dedup of RwLock guard acquisition --- crates/signer/src/service.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index a965f057..cce8038e 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -64,8 +64,14 @@ impl SigningService { jwts: config.jwts.into(), }; - let loaded_consensus = state.manager.read().await.available_consensus_signers(); - let loaded_proxies = state.manager.read().await.available_proxy_signers(); + // Get the signer counts + let loaded_consensus: usize; + let loaded_proxies: usize; + { + let manager = state.manager.read().await; + loaded_consensus = manager.available_consensus_signers(); + loaded_proxies = manager.available_proxy_signers(); + } info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); From c3d7ec40f92a4dc2c4481afd517d81ebf9e9b7cc Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 00:58:49 -0400 Subject: [PATCH 22/52] Added rate limiting for signer clients with repeated JWT auth failures --- crates/common/src/config/constants.rs | 5 ++ crates/common/src/config/signer.rs | 51 ++++++++++- crates/common/src/signer/constants.rs | 5 ++ crates/common/src/utils.rs | 4 + crates/signer/src/error.rs | 6 ++ crates/signer/src/service.rs | 116 ++++++++++++++++++++++++-- 6 files changed, 176 insertions(+), 11 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index d7799146..5941a42b 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -35,6 +35,11 @@ pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; +// JWT authentication settings +pub const SIGNER_JWT_AUTH_FAIL_LIMIT_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_LIMIT"; +pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = + "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; + /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; /// The JWT secret for the signer to validate the modules requests diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..6eb870cf 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -11,13 +11,17 @@ use url::Url; use super::{ load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, + signer::{ + ProxyStore, SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, + DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, DEFAULT_SIGNER_PORT, + }, types::{Chain, ModuleId}, - utils::{default_host, default_u16}, + utils::{default_host, default_u16, default_u32}, }; #[derive(Debug, Serialize, Deserialize, Clone)] @@ -26,12 +30,24 @@ pub struct SignerConfig { /// Host address to listen for signer API calls on #[serde(default = "default_host")] pub host: Ipv4Addr, + /// Port to listen for signer API calls on #[serde(default = "default_u16::")] pub port: u16, + /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, + + /// Number of JWT auth failures before rate limiting an endpoint + #[serde(default = "default_u32::")] + pub jwt_auth_fail_limit: u32, + + /// Duration in seconds to rate limit an endpoint after the JWT auth failure + /// limit has been reached + #[serde(default = "default_u32::")] + pub jwt_auth_fail_timeout_seconds: u32, + /// Inner type-specific configuration #[serde(flatten)] pub inner: SignerType, @@ -100,6 +116,8 @@ pub struct StartSignerConfig { pub store: Option, pub endpoint: SocketAddr, pub jwts: HashMap, + pub jwt_auth_fail_limit: u32, + pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, } @@ -120,6 +138,29 @@ impl StartSignerConfig { } }; + // Load the JWT auth fail limit the same way + let jwt_auth_fail_limit = + if let Some(limit) = load_optional_env_var(SIGNER_JWT_AUTH_FAIL_LIMIT_ENV) { + limit.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_limit, + None => DEFAULT_JWT_AUTH_FAIL_LIMIT, + } + }; + + // Load the JWT auth fail timeout the same way + let jwt_auth_fail_timeout_seconds = if let Some(timeout) = + load_optional_env_var(SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV) + { + timeout.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_timeout_seconds, + None => DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + } + }; + let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; match signer { @@ -128,6 +169,8 @@ impl StartSignerConfig { loader: Some(loader), endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, store, dirk: None, }), @@ -156,6 +199,8 @@ impl StartSignerConfig { chain: config.chain, endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, loader: None, store, dirk: Some(DirkConfig { diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs index aa834f91..45e3ce23 100644 --- a/crates/common/src/signer/constants.rs +++ b/crates/common/src/signer/constants.rs @@ -1 +1,6 @@ pub const DEFAULT_SIGNER_PORT: u16 = 20000; + +// Rate limit signer API requests for 5 minutes after the endpoint has 3 JWT +// auth failures +pub const DEFAULT_JWT_AUTH_FAIL_LIMIT: u32 = 3; +pub const DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS: u32 = 5 * 60; diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 37119580..a1dcb7cb 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -137,6 +137,10 @@ pub const fn default_u64() -> u64 { U } +pub const fn default_u32() -> u32 { + U +} + pub const fn default_u16() -> u16 { U } diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index 477e9e42..a2a113f3 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -27,6 +27,9 @@ pub enum SignerModuleError { #[error("internal error: {0}")] Internal(String), + + #[error("rate limited for {0} more seconds")] + RateLimited(f64), } impl IntoResponse for SignerModuleError { @@ -45,6 +48,9 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::RateLimited(duration) => { + (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) + } } .into_response() } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index cce8038e..3ca1d5ac 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,7 +1,12 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::Arc, + time::{Duration, Instant}, +}; use axum::{ - extract::{Request, State}, + extract::{ConnectInfo, Request, State}, http::StatusCode, middleware::{self, Next}, response::{IntoResponse, Response}, @@ -41,13 +46,30 @@ use crate::{ /// Implements the Signer API and provides a service for signing requests pub struct SigningService; +// Tracker for a peer's JWT failures +struct JwtAuthFailureInfo { + // Number of auth failures since the first failure was tracked + failure_count: u32, + + // Time of the last auth failure + last_failure: Instant, +} + #[derive(Clone)] struct SigningState { /// Manager handling different signing methods manager: Arc>, + /// Map of modules ids to JWT secrets. This also acts as registry of all /// modules running jwts: Arc>, + + /// Map of JWT failures per peer + jwt_auth_failures: Arc>>, + + // JWT auth failure settings + jwt_auth_fail_limit: u32, + jwt_auth_fail_timeout: Duration, } impl SigningService { @@ -62,6 +84,9 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), jwts: config.jwts.into(), + jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), + jwt_auth_fail_limit: config.jwt_auth_fail_limit, + jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), }; // Get the signer counts @@ -73,7 +98,17 @@ impl SigningService { loaded_proxies = manager.available_proxy_signers(); } - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); + info!( + version = COMMIT_BOOST_VERSION, + commit_hash = COMMIT_BOOST_COMMIT, + modules =? module_ids, + endpoint =? config.endpoint, + loaded_consensus, + loaded_proxies, + jwt_auth_fail_limit =? state.jwt_auth_fail_limit, + jwt_auth_fail_timeout =? state.jwt_auth_fail_timeout, + "Starting signing service" + ); SigningService::init_metrics(config.chain)?; @@ -85,7 +120,8 @@ impl SigningService { .route(RELOAD_PATH, post(handle_reload)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)); + .route(STATUS_PATH, get(handle_status)) + .into_make_service_with_connect_info::(); let listener = TcpListener::bind(config.endpoint).await?; @@ -101,9 +137,76 @@ impl SigningService { async fn jwt_auth( State(state): State, TypedHeader(auth): TypedHeader>, + addr: ConnectInfo, mut req: Request, next: Next, ) -> Result { + // Check if the request needs to be rate limited + let client_ip = addr.ip().to_string(); + check_jwt_rate_limit(&state, &client_ip).await?; + + // Process JWT authorization + match check_jwt_auth(&auth, &state).await { + Ok(module_id) => { + req.extensions_mut().insert(module_id); + Ok(next.run(req).await) + } + Err(SignerModuleError::Unauthorized) => { + let mut failures = state.jwt_auth_failures.write().await; + let failure_info = failures + .entry(client_ip) + .or_insert(JwtAuthFailureInfo { failure_count: 0, last_failure: Instant::now() }); + failure_info.failure_count += 1; + failure_info.last_failure = Instant::now(); + Err(SignerModuleError::Unauthorized) + } + Err(err) => Err(err), + } +} + +/// Checks if the incoming request needs to be rate limited due to previous JWT +/// authentication failures +async fn check_jwt_rate_limit( + state: &SigningState, + client_ip: &String, +) -> Result<(), SignerModuleError> { + let mut failures = state.jwt_auth_failures.write().await; + + // Ignore clients that don't have any failures + if let Some(failure_info) = failures.get(client_ip) { + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + let elapsed = failure_info.last_failure.elapsed(); + if elapsed > state.jwt_auth_fail_timeout { + debug!("Removing {client_ip} from JWT auth failure list"); + failures.remove(client_ip); + return Ok(()); + } + + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit + ); + return Ok(()); + } + + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout - elapsed; + warn!("Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures"); + return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + } + + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + Ok(()) +} + +/// Checks if a request can successfully authenticate with the JWT secret +async fn check_jwt_auth( + auth: &Authorization, + state: &SigningState, +) -> Result { let jwt: Jwt = auth.token().to_string().into(); // We first need to decode it to get the module id and then validate it @@ -122,10 +225,7 @@ async fn jwt_auth( error!("Unauthorized request. Invalid JWT: {e}"); SignerModuleError::Unauthorized })?; - - req.extensions_mut().insert(module_id); - - Ok(next.run(req).await) + Ok(module_id) } /// Requests logging middleware layer From 9ddad6426a1fcdeb441adb259b6b2408729f1937 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 02:06:21 -0400 Subject: [PATCH 23/52] Added Signer config validation --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + crates/common/Cargo.toml | 1 + crates/common/src/config/mod.rs | 3 +++ crates/common/src/config/signer.rs | 21 ++++++++++++++++++++- tests/tests/config.rs | 12 ++++++------ 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..b80a4542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1488,6 +1488,7 @@ dependencies = [ "cipher 0.4.4", "ctr 0.9.2", "derive_more 2.0.1", + "docker-image", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz 0.8.3", @@ -2158,6 +2159,16 @@ dependencies = [ "serde_yaml", ] +[[package]] +name = "docker-image" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ed901b8f2157bafce6e96f39217f7b1a4af32d84266d251ed7c22ce001f0b" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "doctest-file" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index aef26a94..b02ad0da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ color-eyre = "0.6.3" ctr = "0.9.2" derive_more = { version = "2.0.1", features = ["deref", "display", "from", "into"] } docker-compose-types = "0.16.0" +docker-image = "0.2.1" eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "8d058e4040b765a96aa4968f4167af7571292be2" } ethereum_serde_utils = "0.7.0" ethereum_ssz = "0.8" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..c3955d4a 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -16,6 +16,7 @@ blst.workspace = true cipher.workspace = true ctr.workspace = true derive_more.workspace = true +docker-image.workspace = true eth2_keystore.workspace = true ethereum_serde_utils.workspace = true ethereum_ssz.workspace = true diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index 75fd3c9d..b782999b 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -41,6 +41,9 @@ impl CommitBoostConfig { /// Validate config pub async fn validate(&self) -> Result<()> { self.pbs.pbs_config.validate(self.chain).await?; + if let Some(signer) = &self.signer { + signer.validate().await?; + } Ok(()) } diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 6eb870cf..01b50cde 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -4,7 +4,8 @@ use std::{ path::PathBuf, }; -use eyre::{bail, OptionExt, Result}; +use docker_image::DockerImage; +use eyre::{bail, ensure, OptionExt, Result}; use serde::{Deserialize, Serialize}; use tonic::transport::{Certificate, Identity}; use url::Url; @@ -40,6 +41,7 @@ pub struct SignerConfig { pub docker_image: String, /// Number of JWT auth failures before rate limiting an endpoint + /// If set to 0, no rate limiting will be applied #[serde(default = "default_u32::")] pub jwt_auth_fail_limit: u32, @@ -53,6 +55,23 @@ pub struct SignerConfig { pub inner: SignerType, } +impl SignerConfig { + /// Validate the signer config + pub async fn validate(&self) -> Result<()> { + // Port must be positive + ensure!(self.port > 0, "Port must be positive"); + + // The Docker tag must parse + ensure!(!self.docker_image.is_empty(), "Docker image is empty"); + ensure!( + DockerImage::parse(&self.docker_image).is_ok(), + format!("Invalid Docker image: {}", self.docker_image) + ); + + Ok(()) + } +} + fn default_signer() -> String { SIGNER_IMAGE_DEFAULT.to_string() } diff --git a/tests/tests/config.rs b/tests/tests/config.rs index dafd96d9..f6f31d96 100644 --- a/tests/tests/config.rs +++ b/tests/tests/config.rs @@ -37,11 +37,11 @@ async fn test_load_pbs_happy() -> Result<()> { // Docker and general settings assert_eq!(config.pbs.docker_image, "ghcr.io/commit-boost/pbs:latest"); - assert_eq!(config.pbs.with_signer, false); + assert!(!config.pbs.with_signer); assert_eq!(config.pbs.pbs_config.host, "127.0.0.1".parse::().unwrap()); assert_eq!(config.pbs.pbs_config.port, 18550); - assert_eq!(config.pbs.pbs_config.relay_check, true); - assert_eq!(config.pbs.pbs_config.wait_all_registrations, true); + assert!(config.pbs.pbs_config.relay_check); + assert!(config.pbs.pbs_config.wait_all_registrations); // Timeouts assert_eq!(config.pbs.pbs_config.timeout_get_header_ms, 950); @@ -49,12 +49,12 @@ async fn test_load_pbs_happy() -> Result<()> { assert_eq!(config.pbs.pbs_config.timeout_register_validator_ms, 3000); // Bid settings and validation - assert_eq!(config.pbs.pbs_config.skip_sigverify, false); + assert!(!config.pbs.pbs_config.skip_sigverify); dbg!(&config.pbs.pbs_config.min_bid_wei); dbg!(&U256::from(0.5)); assert_eq!(config.pbs.pbs_config.min_bid_wei, U256::from((0.5 * WEI_PER_ETH as f64) as u64)); assert_eq!(config.pbs.pbs_config.late_in_slot_time_ms, 2000); - assert_eq!(config.pbs.pbs_config.extra_validation_enabled, false); + assert!(!config.pbs.pbs_config.extra_validation_enabled); assert_eq!( config.pbs.pbs_config.rpc_url, Some("https://ethereum-holesky-rpc.publicnode.com".parse::().unwrap()) @@ -64,7 +64,7 @@ async fn test_load_pbs_happy() -> Result<()> { let relay = &config.relays[0]; assert_eq!(relay.id, Some("example-relay".to_string())); assert_eq!(relay.entry.url, "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz".parse::().unwrap()); - assert_eq!(relay.enable_timing_games, false); + assert!(!relay.enable_timing_games); assert_eq!(relay.target_first_request_ms, Some(200)); assert_eq!(relay.frequency_get_header_ms, Some(300)); From c62185e13f301a3abcab32f9a28ed42f1185d7e3 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 06:54:50 -0400 Subject: [PATCH 24/52] Started unit test setup for the Signer --- Cargo.lock | 6 +++-- Cargo.toml | 1 + tests/Cargo.toml | 2 ++ tests/src/utils.rs | 44 +++++++++++++++++++++++++++++-- tests/tests/pbs_get_header.rs | 2 +- tests/tests/signer_jwt_auth.rs | 47 ++++++++++++++++++++++++++++++++++ 6 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 tests/tests/signer_jwt_auth.rs diff --git a/Cargo.lock b/Cargo.lock index b80a4542..17d43e3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1590,9 +1590,11 @@ dependencies = [ "axum 0.8.1", "cb-common", "cb-pbs", + "cb-signer", "eyre", "reqwest", "serde_json", + "tempfile", "tokio", "tracing", "tracing-subscriber", @@ -4874,9 +4876,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.19.0" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488960f40a3fd53d72c2a29a58722561dee8afdd175bd88e3db4677d7b2ba600" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", "getrandom 0.3.1", diff --git a/Cargo.toml b/Cargo.toml index b02ad0da..5294508f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,6 +57,7 @@ serde_json = "1.0.117" serde_yaml = "0.9.33" sha2 = "0.10.8" ssz_types = "0.10" +tempfile = "3.20.0" thiserror = "2.0.12" tokio = { version = "1.37.0", features = ["full"] } toml = "0.8.13" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index ce273ae7..f1b5c9d9 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -9,9 +9,11 @@ alloy.workspace = true axum.workspace = true cb-common.workspace = true cb-pbs.workspace = true +cb-signer.workspace = true eyre.workspace = true reqwest.workspace = true serde_json.workspace = true +tempfile.workspace = true tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true diff --git a/tests/src/utils.rs b/tests/src/utils.rs index f2ae9157..e8561931 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -1,13 +1,22 @@ use std::{ + collections::HashMap, net::{Ipv4Addr, SocketAddr}, sync::{Arc, Once}, }; use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ - config::{PbsConfig, PbsModuleConfig, RelayConfig}, + config::{ + PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, + SIGNER_IMAGE_DEFAULT, + }, pbs::{RelayClient, RelayEntry}, - types::Chain, + signer::{ + SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + DEFAULT_SIGNER_PORT, + }, + types::{Chain, ModuleId}, + utils::default_host, }; use eyre::Result; @@ -91,3 +100,34 @@ pub fn to_pbs_config( muxes: None, } } + +pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { + SignerConfig { + host: default_host(), + port: DEFAULT_SIGNER_PORT, + docker_image: SIGNER_IMAGE_DEFAULT.to_string(), + jwt_auth_fail_limit: DEFAULT_JWT_AUTH_FAIL_LIMIT, + jwt_auth_fail_timeout_seconds: DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + inner: SignerType::Local { loader, store: None }, + } +} + +pub fn get_start_signer_config( + signer_config: SignerConfig, + chain: Chain, + jwts: HashMap, +) -> StartSignerConfig { + match signer_config.inner { + SignerType::Local { loader, .. } => StartSignerConfig { + chain, + loader: Some(loader), + store: None, + endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), + jwts, + jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, + dirk: None, + }, + _ => panic!("Only local signers are supported in tests"), + } +} diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 422a71a3..747d460c 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -23,7 +23,7 @@ use tree_hash::TreeHash; async fn test_get_header() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3200; diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs new file mode 100644 index 00000000..989cdb61 --- /dev/null +++ b/tests/tests/signer_jwt_auth.rs @@ -0,0 +1,47 @@ +use std::{collections::HashMap, fs, time::Duration}; + +use cb_common::{ + signer::{SignerLoader, ValidatorKeysFormat}, + types::{Chain, ModuleId}, +}; +use cb_signer::service::SigningService; +use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; +use eyre::Result; +use tempfile::tempdir; + +#[tokio::test] +async fn test_signer_jwt_auth_success() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let mut jwts = HashMap::new(); + jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); + + // Create a temp folder and key structure + let test_folder = tempdir()?; + let test_path = test_folder.path(); + let keys_path = test_path.join("keys"); + let secrets_path = test_path.join("secrets"); + fs::create_dir_all(&keys_path)?; + fs::create_dir_all(&secrets_path)?; + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path, + secrets_path, + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + tokio::spawn(SigningService::run(start_config)); + + // leave some time to start servers + tokio::time::sleep(Duration::from_millis(100)).await; + + // TODO: simple client to test the JWT auth endpoint + + Ok(()) +} From dc73c6215d604cd6f0165801ac7213b462953ebd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 00:52:33 -0400 Subject: [PATCH 25/52] Finished a basic signer module unit test --- tests/tests/signer_jwt_auth.rs | 62 +++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 989cdb61..0e9e97eb 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,13 +1,19 @@ -use std::{collections::HashMap, fs, time::Duration}; +use std::{collections::HashMap, time::Duration}; +use alloy::{hex, primitives::FixedBytes}; use cb_common::{ + commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, + utils::create_jwt, }; use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; -use tempfile::tempdir; +use tracing::info; + +const JWT_MODULE: &str = "test-module"; +const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { @@ -15,33 +21,57 @@ async fn test_signer_jwt_auth_success() -> Result<()> { let chain = Chain::Hoodi; // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); let mut jwts = HashMap::new(); - jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); - - // Create a temp folder and key structure - let test_folder = tempdir()?; - let test_path = test_folder.path(); - let keys_path = test_path.join("keys"); - let secrets_path = test_path.join("secrets"); - fs::create_dir_all(&keys_path)?; - fs::create_dir_all(&secrets_path)?; + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); // Create a signer config let loader = SignerLoader::ValidatorsDir { - keys_path, - secrets_path, + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; let config = get_signer_config(loader); + let host = config.host; + let port = config.port; let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config)); - // leave some time to start servers + // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, JWT_SECRET)?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_success(), "Failed to authenticate with JWT"); + let pubkey_json = response.json::().await?; - // TODO: simple client to test the JWT auth endpoint + // Verify the expected pubkeys are returned + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 6c3d9670f4ff7d9e6fa7e5b8b497deea01043347 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:15:44 -0400 Subject: [PATCH 26/52] Added a JWT failure unit test --- tests/tests/signer_jwt_auth.rs | 49 ++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 0e9e97eb..fd111814 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -75,3 +75,52 @@ async fn test_signer_jwt_auth_success() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); + let mut jwts = HashMap::new(); + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let host = config.host; + let port = config.port; + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + let server_handle = tokio::spawn(SigningService::run(start_config)); + + // Make sure the server is running + tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, "incorrect secret")?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} From 6464638a443b63e58ed3cfef381210aa13f963b2 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 02:05:28 -0400 Subject: [PATCH 27/52] Added a rate limit test and cleaned up a bit --- tests/tests/signer_jwt_auth.rs | 145 +++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 63 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index fd111814..961afb3e 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, time::Duration}; use alloy::{hex, primitives::FixedBytes}; use cb_common::{ commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, + config::StartSignerConfig, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, utils::create_jwt, @@ -10,6 +11,7 @@ use cb_common::{ use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; +use reqwest::{Response, StatusCode}; use tracing::info; const JWT_MODULE: &str = "test-module"; @@ -18,66 +20,75 @@ const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); - let chain = Chain::Hoodi; + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Mock JWT secrets + // Run a pubkeys request + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + + // Verify the expected pubkeys are returned + verify_pubkeys(response).await?; + + Ok(()) +} + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + let start_config = start_server().await?; - // Create a signer config - let loader = SignerLoader::ValidatorsDir { - keys_path: "data/keystores/keys".into(), - secrets_path: "data/keystores/secrets".into(), - format: ValidatorKeysFormat::Lighthouse, - }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; - let start_config = get_start_signer_config(config, chain, jwts); + // Run a pubkeys request - this should fail due to invalid JWT + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} - // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); +#[tokio::test] +async fn test_signer_jwt_rate_limit() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Make sure the server is running - tokio::time::sleep(Duration::from_millis(100)).await; - if server_handle.is_finished() { - return Err(eyre::eyre!( - "Signer service failed to start: {}", - server_handle.await.unwrap_err() - )); + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + for _ in 0..start_config.jwt_auth_fail_limit { + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); } - // Create a JWT header + // Run another request - this should fail due to rate limiting now let jwt = create_jwt(&module_id, JWT_SECRET)?; + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_success(), "Failed to authenticate with JWT"); - let pubkey_json = response.json::().await?; + // Wait for the rate limit timeout + tokio::time::sleep(Duration::from_secs(start_config.jwt_auth_fail_timeout_seconds as u64)) + .await; - // Verify the expected pubkeys are returned - assert_eq!(pubkey_json.keys.len(), 2); - let expected_pubkeys = vec![ - FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), - FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), - ]; - for expected in expected_pubkeys { - assert!( - pubkey_json.keys.iter().any(|k| k.consensus == expected), - "Expected pubkey not found: {:?}", - expected - ); - info!("Server returned expected pubkey: {:?}", expected); - } + // Now the next request should succeed + let response = client.get(&url).bearer_auth(&jwt).send().await?; + verify_pubkeys(response).await?; Ok(()) } -#[tokio::test] -async fn test_signer_jwt_auth_fail() -> Result<()> { +// Starts the signer moduler server on a separate task and returns its +// configuration +async fn start_server() -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -92,13 +103,13 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; + let mut config = get_signer_config(loader); + config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing + config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config.clone())); // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; @@ -108,19 +119,27 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { server_handle.await.unwrap_err() )); } + Ok(start_config) +} - // Create a JWT header - let jwt = create_jwt(&module_id, "incorrect secret")?; - - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); - info!( - "Server returned expected error code {} for invalid JWT: {}", - response.status(), - response.text().await.unwrap_or_else(|_| "No response body".to_string()) - ); +// Verifies that the pubkeys returned by the server match the pubkeys in the +// test data +async fn verify_pubkeys(response: Response) -> Result<()> { + // Verify the expected pubkeys are returned + assert!(response.status() == StatusCode::OK); + let pubkey_json = response.json::().await?; + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 0313f18c27880a85d5c9af7698884f27b7cf895e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 03:30:34 -0400 Subject: [PATCH 28/52] Added unique ports to unit tests for parallel execution --- tests/tests/signer_jwt_auth.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 961afb3e..90a0365f 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -21,7 +21,7 @@ const JWT_SECRET: &str = "test-jwt-secret"; async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20100).await?; // Run a pubkeys request let jwt = create_jwt(&module_id, JWT_SECRET)?; @@ -39,7 +39,7 @@ async fn test_signer_jwt_auth_success() -> Result<()> { async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20200).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -59,7 +59,7 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20300).await?; // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -88,7 +88,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { // Starts the signer moduler server on a separate task and returns its // configuration -async fn start_server() -> Result { +async fn start_server(port: u16) -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -104,6 +104,7 @@ async fn start_server() -> Result { format: ValidatorKeysFormat::Lighthouse, }; let mut config = get_signer_config(loader); + config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); From 346eea4c0ee7c6e7e53ec1f6c950e3289be214dd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 16:36:11 -0400 Subject: [PATCH 29/52] Cleaned up the build Dockerfile and removed an extra dependency layer --- provisioning/build.Dockerfile | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 34ad27a5..43713cc5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,7 +1,10 @@ # This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +FROM --platform=${BUILDPLATFORM} rust:1.83-slim-bookworm AS chef ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE +ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse WORKDIR /app +RUN cargo install cargo-chef --locked && \ + rm -rf $CARGO_HOME/registry/ FROM --platform=${BUILDPLATFORM} chef AS planner ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE @@ -20,8 +23,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add aarch64-unknown-linux-gnu && \ dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -35,8 +38,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add x86_64-unknown-linux-gnu && \ dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -49,12 +52,14 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ + apt update && \ + apt install -y git libssl-dev:${TARGETARCH} zlib1g-dev:${TARGETARCH} pkg-config && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old From 7b20d2f885efa8591d834d1deebb7b550d89683d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 05:03:25 -0400 Subject: [PATCH 30/52] Ported the build script over to the justfile --- build-linux.sh | 144 ------------------------ docs/docs/get_started/building.md | 28 ++--- justfile | 175 +++++++++++++++++++++++++++++- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 5 files changed, 185 insertions(+), 166 deletions(-) delete mode 100755 build-linux.sh diff --git a/build-linux.sh b/build-linux.sh deleted file mode 100755 index a7266bd9..00000000 --- a/build-linux.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash - -# This script will build the Commit-Boost applications and modules for local Linux development. - -# ================= -# === Functions === -# ================= - -# Print a failure message to stderr and exit -fail() { - MESSAGE=$1 - RED='\033[0;31m' - RESET='\033[;0m' - >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" - exit 1 -} - - -# Builds the CLI binaries for Linux -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_cli() { - echo "Building CLI binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - echo "done!" -} - - -# Builds the PBS module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_pbs() { - echo "Building PBS binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building PBS Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." - fi - echo "done!" -} - - -# Builds the Signer module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_signer() { - echo "Building Signer binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building Signer Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." - fi - echo "done!" -} - - -# Print usage -usage() { - echo "Usage: build.sh [options] -v " - echo "This script assumes it is in the commit-boost-client repository directory." - echo "Options:" - echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' - echo $'\t-c\tBuild the Commit-Boost CLI binaries' - echo $'\t-p\tBuild the PBS module binary and its Docker container' - echo $'\t-s\tBuild the Signer module binary and its Docker container' - echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' - exit 0 -} - - -# ================= -# === Main Body === -# ================= - -# Parse arguments -while getopts "acpsov:" FLAG; do - case "$FLAG" in - a) CLI=true PBS=true SIGNER=true ;; - c) CLI=true ;; - p) PBS=true ;; - s) SIGNER=true ;; - o) LOCAL_UPLOAD=true ;; - v) VERSION="$OPTARG" ;; - *) usage ;; - esac -done -if [ -z "$VERSION" ]; then - usage -fi - -# Cleanup old artifacts -rm -rf build/$VERSION/* -mkdir -p build/$VERSION - -# Make a multiarch builder, ignore if it's already there -docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 -# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: -# https://stackoverflow.com/a/73585243 - -# Build the artifacts -if [ "$CLI" = true ]; then - build_cli -fi -if [ "$PBS" = true ]; then - build_pbs -fi -if [ "$SIGNER" = true ]; then - build_signer -fi diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..edf795b2 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -5,31 +5,27 @@ Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). ## Building via the Docker Builder -For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. It utilizes Docker's powerful [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. :::note -The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +The build system assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). ::: -We provide a build script called `build-linux.sh` to automate the process: +The Docker builder is built into the project's `justfile` which is used to invoke many facets of Commit Boost development. To use it, you'll need to install [Just](https://github.com/casey/just) on your system. -``` -$ ./build-linux.sh -Usage: build.sh [options] -v -This script assumes it is in the commit-boost-client repository directory. -Options: - -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) - -c Build the Commit-Boost CLI binaries - -p Build the PBS module binary and its Docker container - -s Build the Signer module binary and its Docker container - -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY -``` +Use `just --list` to show all of the actions - there are many. The `justfile` provides granular actions, called "recipes", for building just the binaries of a specific crate (such as the CLI, `pbs`, or `signer`), as well as actions to build the Docker images for the PBS and Signer modules. + +Below is a brief summary of the relevant ones for building the Commit-Boost artifacts: + +- `build-all ` will build the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries for your local system architecture. It will also create Docker images called `commit-boost/pbs:` and `commit-boost/signer:` and load them into your local Docker registry for use. +- `build-cli-bin `, `build-pbs-bin `, and `build-signer-bin ` can be used to create the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries, respectively. +- `build-pbs-img ` and `build-signer-img ` can be used to create the Docker images for the PBS and Signer modules, respectively. -The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. +The `version` provided will be used to house the output binaries in `./build/`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. -The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. +If you're interested in building the binaries and/or Docker images for multiple architectures (currently Linux `amd64` and `arm64`), use the variants of those recipes that have the `-multiarch` suffix. Note that building a multiarch Docker image manifest will require the use of a [custom Docker registry](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-private-docker-registry-on-ubuntu-20-04), as the local registry built into Docker does not have multiarch manifest support. ## Building Manually diff --git a/justfile b/justfile index d13e76ae..ac1314fc 100644 --- a/justfile +++ b/justfile @@ -12,16 +12,183 @@ fmt-check: clippy: cargo +{{toolchain}} clippy --all-features --no-deps -- -D warnings -docker-build-pbs: - docker build -t commitboost_pbs_default . -f ./provisioning/pbs.Dockerfile +# =================================== +# === Build Commands for Services === +# =================================== -docker-build-signer: - docker build -t commitboost_signer . -f ./provisioning/signer.Dockerfile +[doc(""" + Builds the commit-boost-cli binary to './build/'. +""")] +build-cli version: \ + (_docker-build-binary version "cli") + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-cli crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). +""")] +build-cli-multiarch version: \ + (_docker-build-binary-multiarch version "cli") + +[doc(""" + Builds the commit-boost-pbs binary to './build/'. +""")] +build-pbs-bin version: \ + (_docker-build-binary version "pbs") + +[doc(""" + Creates a Docker image named 'commit-boost/pbs:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-pbs-img version: \ + (_docker-build-image version "pbs") + +[doc(""" + Builds the commit-boost-pbs binary to './build/' and creates a Docker image named 'commit-boost/pbs:'. +""")] +build-pbs version: \ + (build-pbs-bin version) \ + (build-pbs-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the pbs Docker image. +""")] +build-pbs-bin-multiarch version: \ + (_docker-build-binary-multiarch version "pbs") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "pbs" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-multiarch version local-docker-registry: \ + (build-pbs-bin-multiarch version) \ + (build-pbs-img-multiarch version local-docker-registry) + +[doc(""" + Builds the commit-boost-signer binary to './build/'. +""")] +build-signer-bin version: \ + (_docker-build-binary version "signer") + +[doc(""" + Creates a Docker image named 'commit-boost/signer:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-signer-img version: \ + (_docker-build-image version "signer") + +[doc(""" + Builds the commit-boost-signer binary to './build/' and creates a Docker image named 'commit-boost/signer:'. +""")] +build-signer version: \ + (build-signer-bin version) \ + (build-signer-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the signer Docker image. +""")] +build-signer-bin-multiarch version: \ + (_docker-build-binary-multiarch version "signer") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "signer" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-multiarch version local-docker-registry: \ + (build-signer-bin-multiarch version) \ + (build-signer-img-multiarch version local-docker-registry) + +[doc(""" + Builds the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build/'. + The Docker images will be named 'commit-boost/cli:', 'commit-boost/pbs:', and + 'commit-boost/signer:'. +""")] +build-all version: \ + (build-cli version) \ + (build-pbs version) \ + (build-signer version) + +[doc(""" + Builds amd64 and arm64 flavors of the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Also creates multiarch Docker image manifests for each crate and pushes them to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-all-multiarch version local-docker-registry: \ + (build-cli-multiarch version) \ + (build-pbs-multiarch version local-docker-registry) \ + (build-signer-multiarch version local-docker-registry) + +# =============================== +# === Builder Implementations === +# =============================== + +# Creates a Docker buildx builder if it doesn't already exist +_create-docker-builder: + docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 || true + +# Builds a binary for a specific crate and version +_docker-build-binary version crate: _create-docker-builder + export PLATFORM=$(docker buildx inspect --bootstrap | awk -F': ' '/Platforms/ {print $2}' | cut -d',' -f1 | xargs | tr '/' '_'); \ + docker buildx build --rm --platform=local -f provisioning/build.Dockerfile --output "build/{{version}}/$PLATFORM" --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a Docker image for a specific crate and version +_docker-build-image version crate: _create-docker-builder + docker buildx build --rm --load --build-arg BINARIES_PATH=build/{{version}} -t commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile . + +# Builds multiple binaries (for Linux amd64 and arm64 architectures) for a specific crate and version +_docker-build-binary-multiarch version crate: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/{{version}} --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a multi-architecture (Linux amd64 and arm64) Docker manifest for a specific crate and version. +# Uploads to the custom Docker registry (such as '192.168.1.10:5000') instead of a public registry like GHCR or Docker Hub. +_docker-build-image-multiarch version crate local-docker-registry: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/{{version}} -t {{local-docker-registry}}/commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile --push . + +# ================= +# === Utilities === +# ================= docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile docker build -t test_status_api . -f examples/status_api/Dockerfile +# Cleans the build directory, removing all built binaries. +# Docker images are not removed by this command. +clean: + rm -rf build + +# Runs the suite of tests for all commit-boost crates. test: cargo test --all-features \ No newline at end of file diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eb72702..6b9496ec 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-pbs /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 05679762..5ea619b2 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-signer /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From ca9f4a1997103e81427d3c9ca04a54317ce9fb2b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 16:08:50 -0400 Subject: [PATCH 31/52] Added a justfile recipe for installing protoc --- docs/docs/get_started/building.md | 19 ++++++------------- justfile | 3 +++ provisioning/protoc.sh | 11 +++++++---- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index edf795b2..a00b36cf 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -53,24 +53,17 @@ sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-de Install the Protobuf compiler: :::note -While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that Debian-based ones use v3.21 which is quite out of date. We recommend getting the latest version manually. ::: +We provide a convenient recipe to install the latest version directly from the GitHub releases page: + ```bash -PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') -MACHINE_ARCH=$(uname -m) -case "${MACHINE_ARCH}" in - aarch64) PROTOC_ARCH=aarch_64;; - x86_64) PROTOC_ARCH=x86_64;; - *) echo "${MACHINE_ARCH} is not supported."; exit 1;; -esac -curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip -sudo unzip -q protoc.zip bin/protoc -d /usr -sudo unzip -q protoc.zip "include/google/*" -d /usr -sudo chmod a+x /usr/bin/protoc -rm -rf protoc.zip +just install-protoc ``` +This works on OSX and Linux systems, but you are welcome to download and install it manually as well. + With the prerequisites set up, pull the repository: ```bash git clone https://github.com/Commit-Boost/commit-boost-client diff --git a/justfile b/justfile index ac1314fc..ee5f4c2d 100644 --- a/justfile +++ b/justfile @@ -179,6 +179,9 @@ _docker-build-image-multiarch version crate local-docker-registry: _create-docke # === Utilities === # ================= +install-protoc: + provisioning/protoc.sh + docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh index 7f66a656..a727a7c1 100755 --- a/provisioning/protoc.sh +++ b/provisioning/protoc.sh @@ -21,7 +21,10 @@ case "$(uname)" in Linux*) PROTOC_OS="linux" ; TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually - apt update && apt install -y unzip curl ca-certificates jq ;; + if [ $(id -u) != "0" ]; then + CMD_PREFIX="sudo " ; + fi + ${CMD_PREFIX}apt update && ${CMD_PREFIX}apt install -y unzip curl ca-certificates jq ;; *) echo "Unsupported OS: $(uname)" ; exit 1 ;; @@ -50,8 +53,8 @@ echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" # Download and install protoc curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" -unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" -unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" -chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +${CMD_PREFIX}unzip -qo protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +${CMD_PREFIX}unzip -qo protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +${CMD_PREFIX}chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" rm -rf protoc.zip || fail "Failed to remove protoc zip file" echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file From 612b0720bebe05bb3d5bf19b808be92c3f950952 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 4 Jun 2025 04:03:46 -0400 Subject: [PATCH 32/52] Added chunked reading to some HTTP response handlers --- Cargo.lock | 1 + Cargo.toml | 1 + crates/common/Cargo.toml | 1 + crates/common/src/config/constants.rs | 8 +++++ crates/common/src/config/mux.rs | 45 ++++++++++++++++++++------- crates/common/src/config/utils.rs | 37 +++++++++++++++++++++- 6 files changed, 80 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98f4f9f4..a88f994a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1485,6 +1485,7 @@ dependencies = [ "base64 0.22.1", "bimap", "blst", + "bytes", "cipher 0.4.4", "ctr 0.9.2", "derive_more 2.0.1", diff --git a/Cargo.toml b/Cargo.toml index cdaf6ff1..59d1071e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ base64 = "0.22.1" bimap = { version = "0.6.3", features = ["serde"] } blsful = "2.5" blst = "0.3.11" +bytes = "1.10.1" cb-cli = { path = "crates/cli" } cb-common = { path = "crates/common" } cb-metrics = { path = "crates/metrics" } diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..7561e9ac 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -13,6 +13,7 @@ axum.workspace = true base64.workspace = true bimap.workspace = true blst.workspace = true +bytes.workspace = true cipher.workspace = true ctr.workspace = true derive_more.workspace = true diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 422af7e7..1b7b4025 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -69,6 +69,14 @@ pub const PROXY_DIR_KEYS_DEFAULT: &str = "/proxy_keys"; pub const PROXY_DIR_SECRETS_ENV: &str = "CB_PROXY_SECRETS_DIR"; pub const PROXY_DIR_SECRETS_DEFAULT: &str = "/proxy_secrets"; +////////////////////////// MUXER ////////////////////////// + +/// Timeout for Muxer HTTP requests, in seconds +pub const MUXER_HTTP_TIMEOUT_DEFAULT: u64 = 10; + +/// Max content length for Muxer HTTP responses, in bytes +pub const MUXER_HTTP_MAX_LENGTH: u64 = 1024 * 1024 * 1024 * 10; // 10 MiB + ///////////////////////// MODULES ///////////////////////// /// The unique ID of the module diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 7510102c..4b3bb3c1 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -2,6 +2,7 @@ use std::{ collections::{HashMap, HashSet}, path::{Path, PathBuf}, sync::Arc, + time::Duration, }; use alloy::{ @@ -16,7 +17,11 @@ use tracing::{debug, info}; use url::Url; use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; -use crate::{pbs::RelayClient, types::Chain}; +use crate::{ + config::{safe_read_http_response, MUXER_HTTP_TIMEOUT_DEFAULT}, + pbs::RelayClient, + types::Chain, +}; #[derive(Debug, Deserialize, Serialize)] pub struct PbsMuxes { @@ -142,10 +147,12 @@ pub enum MuxKeysLoader { File(PathBuf), HTTP { url: String, + timeout: Option, }, Registry { registry: NORegistry, node_operator_id: u64, + timeout: Option, }, } @@ -174,15 +181,21 @@ impl MuxKeysLoader { serde_json::from_str(&file).wrap_err("failed to parse mux keys file") } - Self::HTTP { url } => { - let client = reqwest::Client::new(); + Self::HTTP { url, timeout } => { + let url = Url::parse(url).wrap_err("failed to parse mux keys URL")?; + if url.scheme() != "https" { + bail!("mux keys URL must use HTTPS"); + } + let client = reqwest::ClientBuilder::new() + .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) + .build()?; let response = client.get(url).send().await?; - let pubkeys = response.text().await?; + let pubkeys = safe_read_http_response(response).await?; serde_json::from_str(&pubkeys) - .wrap_err("failed to fetch mux keys from http endpoint") + .wrap_err("failed to fetch mux keys from HTTP endpoint") } - Self::Registry { registry, node_operator_id } => match registry { + Self::Registry { registry, node_operator_id, timeout } => match registry { NORegistry::Lido => { let Some(rpc_url) = rpc_url else { bail!("Lido registry requires RPC URL to be set in the PBS config"); @@ -190,7 +203,9 @@ impl MuxKeysLoader { fetch_lido_registry_keys(rpc_url, chain, U256::from(*node_operator_id)).await } - NORegistry::SSV => fetch_ssv_pubkeys(chain, U256::from(*node_operator_id)).await, + NORegistry::SSV => { + fetch_ssv_pubkeys(chain, U256::from(*node_operator_id), timeout).await + } }, } } @@ -286,6 +301,7 @@ async fn fetch_lido_registry_keys( async fn fetch_ssv_pubkeys( chain: Chain, node_operator_id: U256, + timeout: &Option, ) -> eyre::Result> { const MAX_PER_PAGE: usize = 100; @@ -296,7 +312,9 @@ async fn fetch_ssv_pubkeys( _ => bail!("SSV network is not supported for chain: {chain:?}"), }; - let client = reqwest::Client::new(); + let client = reqwest::ClientBuilder::new() + .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) + .build()?; let mut pubkeys: Vec = vec![]; let mut page = 1; @@ -308,9 +326,12 @@ async fn fetch_ssv_pubkeys( )) .send() .await - .map_err(|e| eyre::eyre!("Error sending request to SSV network API: {e}"))? - .json::() - .await?; + .map_err(|e| eyre::eyre!("Error sending request to SSV network API: {e}"))?; + + // Parse the response as JSON + let body_string = safe_read_http_response(response).await?; + let response = serde_json::from_slice::(body_string.as_bytes()) + .wrap_err("failed to parse SSV response")?; pubkeys.extend(response.validators.iter().map(|v| v.pubkey).collect::>()); page += 1; @@ -393,7 +414,7 @@ mod tests { let chain = Chain::Holesky; let node_operator_id = U256::from(200); - let pubkeys = fetch_ssv_pubkeys(chain, node_operator_id).await?; + let pubkeys = fetch_ssv_pubkeys(chain, node_operator_id, &None).await?; assert_eq!(pubkeys.len(), 3); diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 67c367c5..969a4688 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,10 +1,11 @@ use std::{collections::HashMap, path::Path}; +use bytes::{BufMut, BytesMut}; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use super::JWTS_ENV; -use crate::types::ModuleId; +use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId}; pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) @@ -30,6 +31,40 @@ pub fn load_jwt_secrets() -> Result> { decode_string_to_map(&jwt_secrets) } +/// Reads an HTTP response safely, erroring out if it failed or if the body is +/// too large. +pub async fn safe_read_http_response(mut response: reqwest::Response) -> Result { + // Break if content length is provided but it's too big + if let Some(length) = response.content_length() { + if length > MUXER_HTTP_MAX_LENGTH { + bail!("Response content length ({length}) exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)"); + } + } + + // Make sure the response is a 200 + if response.status() != reqwest::StatusCode::OK { + bail!("Request failed with status: {}", response.status()); + } + + // Read the response to a buffer in chunks + let mut buffer = BytesMut::with_capacity(1024); + while let Some(chunk) = response.chunk().await? { + if buffer.len() > MUXER_HTTP_MAX_LENGTH as usize { + bail!( + "Response body exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)" + ); + } + buffer.put(chunk); + } + + // Convert the buffer to a string + let bytes = buffer.freeze(); + match std::str::from_utf8(&bytes) { + Ok(s) => Ok(s.to_string()), + Err(e) => bail!("Failed to decode response body as UTF-8: {e}"), + } +} + fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() From 672aacf35823ac1dc2d265a85cc6a09a71266ec2 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 5 Jun 2025 13:12:34 -0400 Subject: [PATCH 33/52] Started putting together unit tests for the SSV key loader's HTTP handling --- crates/common/src/config/mux.rs | 172 ++++++++++++++++++++++++++++---- tests/data/ssv_valid.json | 99 ++++++++++++++++++ 2 files changed, 250 insertions(+), 21 deletions(-) create mode 100644 tests/data/ssv_valid.json diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 4b3bb3c1..f070b293 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -312,27 +312,16 @@ async fn fetch_ssv_pubkeys( _ => bail!("SSV network is not supported for chain: {chain:?}"), }; - let client = reqwest::ClientBuilder::new() - .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) - .build()?; let mut pubkeys: Vec = vec![]; let mut page = 1; loop { - let response = client - .get(format!( - "https://api.ssv.network/api/v4/{}/validators/in_operator/{}?perPage={}&page={}", - chain_name, node_operator_id, MAX_PER_PAGE, page - )) - .send() - .await - .map_err(|e| eyre::eyre!("Error sending request to SSV network API: {e}"))?; - - // Parse the response as JSON - let body_string = safe_read_http_response(response).await?; - let response = serde_json::from_slice::(body_string.as_bytes()) - .wrap_err("failed to parse SSV response")?; + let url = format!( + "https://api.ssv.network/api/v4/{}/validators/in_operator/{}?perPage={}&page={}", + chain_name, node_operator_id, MAX_PER_PAGE, page + ); + let response = fetch_ssv_pubkeys_from_url(&url, timeout).await?; pubkeys.extend(response.validators.iter().map(|v| v.pubkey).collect::>()); page += 1; @@ -353,6 +342,24 @@ async fn fetch_ssv_pubkeys( Ok(pubkeys) } +async fn fetch_ssv_pubkeys_from_url(url: &str, timeout: &Option) -> eyre::Result { + let client = reqwest::ClientBuilder::new() + .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) + .build()?; + let response = client.get(url).send().await.map_err(|e| { + if e.is_timeout() { + eyre::eyre!("Request to SSV network API timed out: {e}") + } else { + eyre::eyre!("Error sending request to SSV network API: {e}") + } + })?; + + // Parse the response as JSON + let body_string = safe_read_http_response(response).await?; + serde_json::from_slice::(body_string.as_bytes()) + .wrap_err("failed to parse SSV response") +} + #[derive(Deserialize)] struct SSVResponse { validators: Vec, @@ -372,10 +379,17 @@ struct SSVPagination { #[cfg(test)] mod tests { - use alloy::{primitives::U256, providers::ProviderBuilder}; + use std::net::SocketAddr; + + use alloy::{hex::FromHex, primitives::U256, providers::ProviderBuilder}; + use axum::{response::Response, routing::get}; + use tokio::{net::TcpListener, task::JoinHandle}; use url::Url; use super::*; + use crate::config::MUXER_HTTP_MAX_LENGTH; + + const TEST_HTTP_TIMEOUT: u64 = 2; #[tokio::test] async fn test_lido_registry_address() -> eyre::Result<()> { @@ -410,14 +424,130 @@ mod tests { } #[tokio::test] + /// Tests that a successful SSV network fetch is handled and parsed properly async fn test_ssv_network_fetch() -> eyre::Result<()> { - let chain = Chain::Holesky; - let node_operator_id = U256::from(200); + // Start the mock server + let port = 30100; + let _server_handle = create_mock_server(port).await?; + let url = format!("http://localhost:{port}/ssv"); + let response = fetch_ssv_pubkeys_from_url(&url, &None).await?; + + // Make sure the response is correct + // NOTE: requires that ssv_data.json dpesn't change + assert_eq!(response.validators.len(), 3); + let expected_pubkeys = [ + BlsPublicKey::from_hex( + "0x967ba17a3e7f82a25aa5350ec34d6923e28ad8237b5a41efe2c5e325240d74d87a015bf04634f21900963539c8229b2a", + )?, + BlsPublicKey::from_hex( + "0xac769e8cec802e8ffee34de3253be8f438a0c17ee84bdff0b6730280d24b5ecb77ebc9c985281b41ee3bda8663b6658c", + )?, + BlsPublicKey::from_hex( + "0x8c866a5a05f3d45c49b457e29365259021a509c5daa82e124f9701a960ee87b8902e87175315ab638a3d8b1115b23639", + )?, + ]; + for (i, validator) in response.validators.iter().enumerate() { + assert_eq!(validator.pubkey, expected_pubkeys[i]); + } - let pubkeys = fetch_ssv_pubkeys(chain, node_operator_id, &None).await?; + // Clean up the server handle + _server_handle.abort(); + info!("SSV network fetch test passed successfully"); - assert_eq!(pubkeys.len(), 3); + Ok(()) + } + + #[tokio::test] + /// Tests that the SSV network fetch is handled properly when the request + /// times out + async fn test_ssv_network_fetch_timeout() -> eyre::Result<()> { + // Start the mock server + let port = 30101; + let _server_handle = create_mock_server(port).await?; + let url = format!("http://localhost:{port}/timeout"); + let response = fetch_ssv_pubkeys_from_url(&url, &Some(TEST_HTTP_TIMEOUT)).await; + + // The response should fail due to timeout + assert!(response.is_err(), "Expected timeout error, but got success"); + if let Err(e) = response { + assert!(e.to_string().contains("timed out"), "Expected timeout error, got: {}", e); + } + + // Clean up the server handle + _server_handle.abort(); + info!("SSV network fetch test passed successfully"); Ok(()) } + + /// Creates a simple mock server to simulate the SSV API endpoint under + /// various conditions for testing + async fn create_mock_server(port: u16) -> Result, axum::Error> { + let router = axum::Router::new() + .route("/ssv", get(handle_ssv)) + .route("/big_content_length", get(handle_big_content_length)) + .route("/big_data", get(handle_big_data)) + .route("/timeout", get(handle_timeout)) + .into_make_service(); + + let address = SocketAddr::from(([127, 0, 0, 1], port)); + let listener = TcpListener::bind(address).await.map_err(axum::Error::new)?; + let server = axum::serve(listener, router).with_graceful_shutdown(async { + tokio::signal::ctrl_c().await.expect("Failed to listen for shutdown signal"); + }); + let result = Ok(tokio::spawn(async move { + if let Err(e) = server.await { + eprintln!("Server error: {}", e); + } + })); + info!("Mock server started on http://localhost:{port}/"); + result + } + + /// Sends the good SSV JSON data to the client + async fn handle_ssv() -> Response { + // Read the JSON data + let data = include_str!("../../../../tests/data/ssv_valid.json"); + + // Create a valid response + Response::builder() + .status(200) + .header("Content-Type", "application/json") + .body(data.into()) + .unwrap() + } + + /// Send an empty response with a large content length + async fn handle_big_content_length() -> Response { + // Create a response with the content length set to some really large value + let body = ""; + Response::builder() + .status(200) + .header("Content-Type", "application/json") + .header("Content-Length", 2 * MUXER_HTTP_MAX_LENGTH) + .body(body.into()) + .unwrap() + } + + /// Sends a response with a large body but no content length + async fn handle_big_data() -> Response { + // Create a response with a large body but no content length + let body = "f".repeat(2 * MUXER_HTTP_MAX_LENGTH as usize); + Response::builder() + .status(200) + .header("Content-Type", "application/text") + .body(body.into()) + .unwrap() + } + + /// Simulates a timeout by sleeping for a long time + async fn handle_timeout() -> Response { + // Sleep for a long time to simulate a timeout + tokio::time::sleep(std::time::Duration::from_secs(2 * TEST_HTTP_TIMEOUT)).await; + Response::builder() + .status(200) + .header("Content-Type", "application/text") + .body("Timeout response".into()) + .unwrap() + } } diff --git a/tests/data/ssv_valid.json b/tests/data/ssv_valid.json new file mode 100644 index 00000000..e19b13e6 --- /dev/null +++ b/tests/data/ssv_valid.json @@ -0,0 +1,99 @@ +{ + "validators": [ + { + "id": 554991, + "public_key": "967ba17a3e7f82a25aa5350ec34d6923e28ad8237b5a41efe2c5e325240d74d87a015bf04634f21900963539c8229b2a", + "cluster": "0xf7c1283eb0c0f76b5fa84c7541d8d4d27751b4083a5e8dcb8ac9e72bb7f559b8", + "owner_address": "0xB2EE025B1d129c61E77223bAb42fc65b29B16243", + "status": "Inactive", + "is_valid": true, + "is_deleted": false, + "is_public_key_valid": true, + "is_shares_valid": true, + "is_operators_valid": true, + "operators": [ + 16, + 27, + 86, + 90, + 200, + 204, + 214 + ], + "validator_info": { + "index": 1476217, + "status": "withdrawal_possible", + "activation_epoch": 4950, + "effective_balance": 32000000000 + }, + "version": "v4", + "network": "holesky" + }, + { + "id": 554992, + "public_key": "ac769e8cec802e8ffee34de3253be8f438a0c17ee84bdff0b6730280d24b5ecb77ebc9c985281b41ee3bda8663b6658c", + "cluster": "0xf7c1283eb0c0f76b5fa84c7541d8d4d27751b4083a5e8dcb8ac9e72bb7f559b8", + "owner_address": "0xB2EE025B1d129c61E77223bAb42fc65b29B16243", + "status": "Inactive", + "is_valid": true, + "is_deleted": false, + "is_public_key_valid": true, + "is_shares_valid": true, + "is_operators_valid": true, + "operators": [ + 16, + 27, + 86, + 90, + 200, + 204, + 214 + ], + "validator_info": { + "index": 1476218, + "status": "withdrawal_possible", + "activation_epoch": 4950, + "effective_balance": 32000000000 + }, + "version": "v4", + "network": "holesky" + }, + { + "id": 554994, + "public_key": "8c866a5a05f3d45c49b457e29365259021a509c5daa82e124f9701a960ee87b8902e87175315ab638a3d8b1115b23639", + "cluster": "0xf7c1283eb0c0f76b5fa84c7541d8d4d27751b4083a5e8dcb8ac9e72bb7f559b8", + "owner_address": "0xB2EE025B1d129c61E77223bAb42fc65b29B16243", + "status": "Inactive", + "is_valid": true, + "is_deleted": false, + "is_public_key_valid": true, + "is_shares_valid": true, + "is_operators_valid": true, + "operators": [ + 16, + 27, + 86, + 90, + 200, + 204, + 214 + ], + "validator_info": { + "index": 1476222, + "status": "withdrawal_possible", + "activation_epoch": 4950, + "effective_balance": 32000000000 + }, + "version": "v4", + "network": "holesky" + } + ], + "pagination": { + "total": 3, + "pages": 1, + "per_page": 10, + "page": 1, + "current_first": 554991, + "current_last": 554994 + } +} \ No newline at end of file From d53728821c88045dd9f6f87c37a9ad076647d601 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:21:12 -0400 Subject: [PATCH 34/52] Update crates/cli/src/docker_init.rs Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- crates/cli/src/docker_init.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 652e3448..c6fcd533 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -73,11 +73,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = if let Some(signer_config) = &cb_config.signer { - signer_config.port - } else { - DEFAULT_SIGNER_PORT - }; + let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(DEFAULT_SIGNER_PORT); let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() From 7afb7633fb3f75baacb88eb3d1600bd15c2e2cc6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:15 -0400 Subject: [PATCH 35/52] Added example signer config params --- config.example.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.example.toml b/config.example.toml index ae69c3ff..89d472c1 100644 --- a/config.example.toml +++ b/config.example.toml @@ -148,6 +148,13 @@ url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09f # Docker image to use for the Signer module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/signer:latest # docker_image = "ghcr.io/commit-boost/signer:latest" +# Host to bind the Signer API server to +# OPTIONAL, DEFAULT: 127.0.0.1 +host = "127.0.0.1" +# Port to listen for Signer API calls on +# OPTIONAL, DEFAULT: 20000 +port = 20000 + # For Remote signer: # [signer.remote] # URL of the Web3Signer instance From 09ac8217f686b378dda48a86fd2b78bad9493b92 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:31 -0400 Subject: [PATCH 36/52] Cleaned up signer config loading from feedback --- crates/common/src/config/signer.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..5618f3ae 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -109,20 +109,17 @@ impl StartSignerConfig { let jwts = load_jwt_secrets()?; - // Load the server endpoint first from the env var, then the config, and finally - // the defaults + let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; + + // Load the server endpoint first from the env var if present, otherwise the + // config let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { endpoint.parse()? } else { - match config.signer { - Some(ref signer) => SocketAddr::from((signer.host, signer.port)), - None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), - } + SocketAddr::from((signer_config.host, signer_config.port)) }; - let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; - - match signer { + match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), From cb7c8eb6ccb09c325a9dacc603777cdc652e3375 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 14:10:17 -0400 Subject: [PATCH 37/52] Finished unit tests for the SSV loader --- Cargo.lock | 42 ++++++++++++++ Cargo.toml | 2 + crates/common/Cargo.toml | 2 + crates/common/src/config/constants.rs | 3 + crates/common/src/config/mux.rs | 79 +++++++++++++++++++++------ crates/common/src/config/utils.rs | 15 ++++- 6 files changed, 122 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 533ad103..49f3b759 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1498,9 +1498,11 @@ dependencies = [ "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", + "scopeguard", "serde", "serde_json", "serde_yaml", + "serial_test", "sha2 0.10.8", "ssz_types", "thiserror 2.0.12", @@ -4332,6 +4334,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "scc" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b2d775fb28f245817589471dd49c5edf64237f4a19d10ce9a92ff4651a27f4" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.27" @@ -4359,6 +4370,12 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "sdd" +version = "3.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "584e070911c7017da6cb2eb0788d09f43d789029b5877d3e5ecc8acf86ceee21" + [[package]] name = "sec1" version = "0.7.3" @@ -4563,6 +4580,31 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "sha1" version = "0.10.6" diff --git a/Cargo.toml b/Cargo.toml index 292be54f..e24abeb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,9 +53,11 @@ prometheus = "0.13.4" prost = "0.13.4" rand = { version = "0.9", features = ["os_rng"] } reqwest = { version = "0.12.4", features = ["json", "stream"] } +scopeguard = "1.2.0" serde = { version = "1.0.202", features = ["derive"] } serde_json = "1.0.117" serde_yaml = "0.9.33" +serial_test = "3.2.0" sha2 = "0.10.8" ssz_types = "0.10" thiserror = "2.0.12" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 7561e9ac..c0f15a55 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -42,3 +42,5 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true +serial_test.workspace = true +scopeguard.workspace = true diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 5fe97958..07572b18 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -89,3 +89,6 @@ pub const SIGNER_URL_ENV: &str = "CB_SIGNER_URL"; /// Events modules /// Where to receive builder events pub const BUILDER_PORT_ENV: &str = "CB_BUILDER_PORT"; + +///////////////////////// TESTING CONSTANTS ///////////////////////// +pub const CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV: &str = "CB_TEST_HTTP_DISABLE_CONTENT_LENGTH"; diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 88343139..c7ffce34 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -383,15 +383,17 @@ struct SSVPagination { #[cfg(test)] mod tests { - use std::net::SocketAddr; + use std::{env, net::SocketAddr}; use alloy::{hex::FromHex, primitives::U256, providers::ProviderBuilder}; use axum::{response::Response, routing::get}; + use scopeguard::defer; + use serial_test::serial; use tokio::{net::TcpListener, task::JoinHandle}; use url::Url; use super::*; - use crate::config::MUXER_HTTP_MAX_LENGTH; + use crate::config::{CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, MUXER_HTTP_MAX_LENGTH}; const TEST_HTTP_TIMEOUT: u64 = 2; @@ -456,7 +458,34 @@ mod tests { // Clean up the server handle _server_handle.abort(); - info!("SSV network fetch test passed successfully"); + + Ok(()) + } + + #[tokio::test] + #[serial] + /// Tests that the SSV network fetch is handled properly when the response's + /// body is too large + async fn test_ssv_network_fetch_big_data() -> eyre::Result<()> { + // Start the mock server + let port = 30101; + env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); + let _server_handle = create_mock_server(port).await?; + let url = format!("http://localhost:{port}/big_data"); + let response = fetch_ssv_pubkeys_from_url(&url, &Some(120)).await; + + // The response should fail due to content length being too big + assert!(response.is_err(), "Expected error due to big content length, but got success"); + if let Err(e) = response { + assert!( + e.to_string().contains("content length") && + e.to_string().contains("exceeds the maximum allowed length"), + "Expected content length error, got: {e}", + ); + } + + // Clean up the server handle + _server_handle.abort(); Ok(()) } @@ -466,7 +495,7 @@ mod tests { /// times out async fn test_ssv_network_fetch_timeout() -> eyre::Result<()> { // Start the mock server - let port = 30101; + let port = 30102; let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/timeout"); let response = fetch_ssv_pubkeys_from_url(&url, &Some(TEST_HTTP_TIMEOUT)).await; @@ -479,7 +508,34 @@ mod tests { // Clean up the server handle _server_handle.abort(); - info!("SSV network fetch test passed successfully"); + + Ok(()) + } + + #[tokio::test] + #[serial] + /// Tests that the SSV network fetch is handled properly when the response's + /// content-length header is missing + async fn test_ssv_network_fetch_big_data_without_content_length() -> eyre::Result<()> { + // Start the mock server + let port = 30103; + env::set_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, "1"); + defer! { env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); } + let _server_handle = create_mock_server(port).await?; + let url = format!("http://localhost:{port}/big_data"); + let response = fetch_ssv_pubkeys_from_url(&url, &Some(120)).await; + + // The response should fail due to timeout + assert!(response.is_err(), "Expected error due to body size, but got success"); + if let Err(e) = response { + assert!( + e.to_string().contains("Response body exceeds the maximum allowed length "), + "Expected content length error, got: {e}", + ); + } + + // Clean up the server handle + _server_handle.abort(); Ok(()) } @@ -489,7 +545,6 @@ mod tests { async fn create_mock_server(port: u16) -> Result, axum::Error> { let router = axum::Router::new() .route("/ssv", get(handle_ssv)) - .route("/big_content_length", get(handle_big_content_length)) .route("/big_data", get(handle_big_data)) .route("/timeout", get(handle_timeout)) .into_make_service(); @@ -521,18 +576,6 @@ mod tests { .unwrap() } - /// Send an empty response with a large content length - async fn handle_big_content_length() -> Response { - // Create a response with the content length set to some really large value - let body = ""; - Response::builder() - .status(200) - .header("Content-Type", "application/json") - .header("Content-Length", 2 * MUXER_HTTP_MAX_LENGTH) - .body(body.into()) - .unwrap() - } - /// Sends a response with a large body but no content length async fn handle_big_data() -> Response { // Create a response with a large body but no content length diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 969a4688..dd391e17 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,11 +1,14 @@ -use std::{collections::HashMap, path::Path}; +use std::{collections::HashMap, env, path::Path}; use bytes::{BufMut, BytesMut}; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use super::JWTS_ENV; -use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId}; +use crate::{ + config::{CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, MUXER_HTTP_MAX_LENGTH}, + types::ModuleId, +}; pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) @@ -34,8 +37,14 @@ pub fn load_jwt_secrets() -> Result> { /// Reads an HTTP response safely, erroring out if it failed or if the body is /// too large. pub async fn safe_read_http_response(mut response: reqwest::Response) -> Result { + // Get the content length from the response headers + let mut content_length = response.content_length(); + if env::var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV).is_ok() { + content_length = None; + } + // Break if content length is provided but it's too big - if let Some(length) = response.content_length() { + if let Some(length) = content_length { if length > MUXER_HTTP_MAX_LENGTH { bail!("Response content length ({length}) exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)"); } From adc4389818039be3889471e8b81dc579b35f00dd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 02:58:21 -0400 Subject: [PATCH 38/52] Ported HTTP timeout to the PBS event publisher --- crates/common/src/config/constants.rs | 5 +-- crates/common/src/config/mux.rs | 47 +++++++++++++++------------ crates/common/src/config/pbs.rs | 5 ++- crates/common/src/pbs/event.rs | 32 +++++++++++++----- 4 files changed, 58 insertions(+), 31 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 4d7c6bb2..309eb15e 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -74,8 +74,9 @@ pub const PROXY_DIR_SECRETS_DEFAULT: &str = "/proxy_secrets"; ////////////////////////// MUXER ////////////////////////// -/// Timeout for Muxer HTTP requests, in seconds -pub const MUXER_HTTP_TIMEOUT_DEFAULT: u64 = 10; +/// Timeout for HTTP requests, in seconds +pub const HTTP_TIMEOUT_SECONDS_ENV: &str = "CB_HTTP_TIMEOUT_SECONDS"; +pub const HTTP_TIMEOUT_SECONDS_DEFAULT: u64 = 10; /// Max content length for Muxer HTTP responses, in bytes pub const MUXER_HTTP_MAX_LENGTH: u64 = 1024 * 1024 * 1024 * 10; // 10 MiB diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index c7ffce34..15887e9b 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -18,7 +18,7 @@ use url::Url; use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; use crate::{ - config::{safe_read_http_response, MUXER_HTTP_TIMEOUT_DEFAULT}, + config::{safe_read_http_response, HTTP_TIMEOUT_SECONDS_DEFAULT, HTTP_TIMEOUT_SECONDS_ENV}, pbs::RelayClient, types::Chain, }; @@ -43,13 +43,19 @@ impl PbsMuxes { chain: Chain, default_pbs: &PbsConfig, ) -> eyre::Result> { + let http_timeout = match load_optional_env_var(HTTP_TIMEOUT_SECONDS_ENV) { + Some(timeout_str) => Duration::from_secs(timeout_str.parse::()?), + None => Duration::from_secs(default_pbs.http_timeout_seconds), + }; + let mut muxes = self.muxes; for mux in muxes.iter_mut() { ensure!(!mux.relays.is_empty(), "mux config {} must have at least one relay", mux.id); if let Some(loader) = &mux.loader { - let extra_keys = loader.load(&mux.id, chain, default_pbs.rpc_url.clone()).await?; + let extra_keys = + loader.load(&mux.id, chain, default_pbs.rpc_url.clone(), http_timeout).await?; mux.validator_pubkeys.extend(extra_keys); } @@ -147,12 +153,10 @@ pub enum MuxKeysLoader { File(PathBuf), HTTP { url: String, - timeout: Option, }, Registry { registry: NORegistry, node_operator_id: u64, - timeout: Option, }, } @@ -170,6 +174,7 @@ impl MuxKeysLoader { mux_id: &str, chain: Chain, rpc_url: Option, + http_timeout: Duration, ) -> eyre::Result> { match self { Self::File(config_path) => { @@ -181,21 +186,19 @@ impl MuxKeysLoader { serde_json::from_str(&file).wrap_err("failed to parse mux keys file") } - Self::HTTP { url, timeout } => { + Self::HTTP { url } => { let url = Url::parse(url).wrap_err("failed to parse mux keys URL")?; if url.scheme() != "https" { bail!("mux keys URL must use HTTPS"); } - let client = reqwest::ClientBuilder::new() - .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) - .build()?; + let client = reqwest::ClientBuilder::new().timeout(http_timeout).build()?; let response = client.get(url).send().await?; let pubkeys = safe_read_http_response(response).await?; serde_json::from_str(&pubkeys) .wrap_err("failed to fetch mux keys from HTTP endpoint") } - Self::Registry { registry, node_operator_id, timeout } => match registry { + Self::Registry { registry, node_operator_id } => match registry { NORegistry::Lido => { let Some(rpc_url) = rpc_url else { bail!("Lido registry requires RPC URL to be set in the PBS config"); @@ -204,7 +207,7 @@ impl MuxKeysLoader { fetch_lido_registry_keys(rpc_url, chain, U256::from(*node_operator_id)).await } NORegistry::SSV => { - fetch_ssv_pubkeys(chain, U256::from(*node_operator_id), timeout).await + fetch_ssv_pubkeys(chain, U256::from(*node_operator_id), http_timeout).await } }, } @@ -305,7 +308,7 @@ async fn fetch_lido_registry_keys( async fn fetch_ssv_pubkeys( chain: Chain, node_operator_id: U256, - timeout: &Option, + http_timeout: Duration, ) -> eyre::Result> { const MAX_PER_PAGE: usize = 100; @@ -325,7 +328,7 @@ async fn fetch_ssv_pubkeys( chain_name, node_operator_id, MAX_PER_PAGE, page ); - let response = fetch_ssv_pubkeys_from_url(&url, timeout).await?; + let response = fetch_ssv_pubkeys_from_url(&url, http_timeout).await?; pubkeys.extend(response.validators.iter().map(|v| v.pubkey).collect::>()); page += 1; @@ -346,10 +349,11 @@ async fn fetch_ssv_pubkeys( Ok(pubkeys) } -async fn fetch_ssv_pubkeys_from_url(url: &str, timeout: &Option) -> eyre::Result { - let client = reqwest::ClientBuilder::new() - .timeout(Duration::from_secs(timeout.unwrap_or(MUXER_HTTP_TIMEOUT_DEFAULT))) - .build()?; +async fn fetch_ssv_pubkeys_from_url( + url: &str, + http_timeout: Duration, +) -> eyre::Result { + let client = reqwest::ClientBuilder::new().timeout(http_timeout).build()?; let response = client.get(url).send().await.map_err(|e| { if e.is_timeout() { eyre::eyre!("Request to SSV network API timed out: {e}") @@ -436,7 +440,9 @@ mod tests { let port = 30100; let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/ssv"); - let response = fetch_ssv_pubkeys_from_url(&url, &None).await?; + let response = + fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(HTTP_TIMEOUT_SECONDS_DEFAULT)) + .await?; // Make sure the response is correct // NOTE: requires that ssv_data.json dpesn't change @@ -472,7 +478,7 @@ mod tests { env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/big_data"); - let response = fetch_ssv_pubkeys_from_url(&url, &Some(120)).await; + let response = fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(120)).await; // The response should fail due to content length being too big assert!(response.is_err(), "Expected error due to big content length, but got success"); @@ -498,7 +504,8 @@ mod tests { let port = 30102; let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/timeout"); - let response = fetch_ssv_pubkeys_from_url(&url, &Some(TEST_HTTP_TIMEOUT)).await; + let response = + fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(TEST_HTTP_TIMEOUT)).await; // The response should fail due to timeout assert!(response.is_err(), "Expected timeout error, but got success"); @@ -523,7 +530,7 @@ mod tests { defer! { env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); } let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/big_data"); - let response = fetch_ssv_pubkeys_from_url(&url, &Some(120)).await; + let response = fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(120)).await; // The response should fail due to timeout assert!(response.is_err(), "Expected error due to body size, but got success"); diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 6c993716..363e9d99 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -17,7 +17,7 @@ use url::Url; use super::{ constants::PBS_IMAGE_DEFAULT, load_optional_env_var, CommitBoostConfig, RuntimeMuxConfig, - PBS_ENDPOINT_ENV, + HTTP_TIMEOUT_SECONDS_DEFAULT, PBS_ENDPOINT_ENV, }; use crate::{ commit::client::SignerClient, @@ -122,6 +122,9 @@ pub struct PbsConfig { pub extra_validation_enabled: bool, /// Execution Layer RPC url to use for extra validation pub rpc_url: Option, + /// Timeout for HTTP requests in seconds + #[serde(default = "default_u64::")] + pub http_timeout_seconds: u64, } impl PbsConfig { diff --git a/crates/common/src/pbs/event.rs b/crates/common/src/pbs/event.rs index 015de714..266fb68c 100644 --- a/crates/common/src/pbs/event.rs +++ b/crates/common/src/pbs/event.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::{net::SocketAddr, time::Duration}; use alloy::{primitives::B256, rpc::types::beacon::relay::ValidatorRegistration}; use async_trait::async_trait; @@ -8,7 +8,7 @@ use axum::{ routing::post, Json, }; -use eyre::bail; +use eyre::{bail, Result}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use tokio::net::TcpListener; @@ -19,7 +19,10 @@ use super::{ GetHeaderParams, GetHeaderResponse, SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, }; use crate::{ - config::{load_optional_env_var, BUILDER_URLS_ENV}, + config::{ + load_optional_env_var, BUILDER_URLS_ENV, HTTP_TIMEOUT_SECONDS_DEFAULT, + HTTP_TIMEOUT_SECONDS_ENV, + }, pbs::BUILDER_EVENTS_PATH, }; @@ -48,11 +51,24 @@ pub struct BuilderEventPublisher { } impl BuilderEventPublisher { - pub fn new(endpoints: Vec) -> Self { - Self { client: reqwest::Client::new(), endpoints } + pub fn new(endpoints: Vec, http_timeout: Duration) -> Result { + for endpoint in &endpoints { + if endpoint.scheme() != "https" { + bail!("BuilderEventPublisher endpoints must use HTTPS (endpoint {endpoint} is invalid)"); + } + } + Ok(Self { + client: reqwest::ClientBuilder::new().timeout(http_timeout).build().unwrap(), + endpoints, + }) } - pub fn new_from_env() -> eyre::Result> { + pub fn new_from_env() -> Result> { + let http_timeout = match load_optional_env_var(HTTP_TIMEOUT_SECONDS_ENV) { + Some(timeout_str) => Duration::from_secs(timeout_str.parse::()?), + None => Duration::from_secs(HTTP_TIMEOUT_SECONDS_DEFAULT), + }; + load_optional_env_var(BUILDER_URLS_ENV) .map(|joined| { let endpoints = joined @@ -62,9 +78,9 @@ impl BuilderEventPublisher { let url = base.trim().parse::()?.join(BUILDER_EVENTS_PATH)?; Ok(url) }) - .collect::>>()?; + .collect::>>()?; - Ok(Self::new(endpoints)) + Self::new(endpoints, http_timeout) }) .transpose() } From 37d299a6e192684a6f69470751b8dc202098c3dd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 03:00:57 -0400 Subject: [PATCH 39/52] Added the http timeout to the example config --- config.example.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config.example.toml b/config.example.toml index d32dfbf9..f95bd255 100644 --- a/config.example.toml +++ b/config.example.toml @@ -55,6 +55,9 @@ extra_validation_enabled = false # Execution Layer RPC url to use for extra validation # OPTIONAL rpc_url = "https://ethereum-holesky-rpc.publicnode.com" +# Timeout for any HTTP requests sent from the PBS module to other services, in seconds +# OPTIONAL, DEFAULT: 10 +http_timeout_seconds = 10 # The PBS module needs one or more [[relays]] as defined below. [[relays]] From 5df487a5906be19ce45a48f14c63f4ad8edf4e2b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 03:05:57 -0400 Subject: [PATCH 40/52] Fixed a test --- crates/common/src/config/mux.rs | 7 +++++-- tests/src/utils.rs | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 15887e9b..e8a7851b 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -18,7 +18,7 @@ use url::Url; use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; use crate::{ - config::{safe_read_http_response, HTTP_TIMEOUT_SECONDS_DEFAULT, HTTP_TIMEOUT_SECONDS_ENV}, + config::{safe_read_http_response, HTTP_TIMEOUT_SECONDS_ENV}, pbs::RelayClient, types::Chain, }; @@ -397,7 +397,10 @@ mod tests { use url::Url; use super::*; - use crate::config::{CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, MUXER_HTTP_MAX_LENGTH}; + use crate::config::{ + CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, HTTP_TIMEOUT_SECONDS_DEFAULT, + MUXER_HTTP_MAX_LENGTH, + }; const TEST_HTTP_TIMEOUT: u64 = 2; diff --git a/tests/src/utils.rs b/tests/src/utils.rs index e8561931..b412efe8 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -81,6 +81,7 @@ pub fn get_pbs_static_config(port: u16) -> PbsConfig { late_in_slot_time_ms: u64::MAX, extra_validation_enabled: false, rpc_url: None, + http_timeout_seconds: 10, } } From ccaf97dc48b94583cd90a20a4ac14ef3bf204d33 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 14:30:13 -0400 Subject: [PATCH 41/52] Added JWT auth fields to the example config --- config.example.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config.example.toml b/config.example.toml index d32dfbf9..899c6a10 100644 --- a/config.example.toml +++ b/config.example.toml @@ -154,6 +154,12 @@ host = "127.0.0.1" # Port to listen for Signer API calls on # OPTIONAL, DEFAULT: 20000 port = 20000 +# Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access +# OPTIONAL, DEFAULT: 3 +jwt_auth_fail_limit: 3 +# How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times +# OPTIONAL, DEFAULT: 300 +jwt_auth_fail_timeout_seconds: 300 # For Remote signer: # [signer.remote] From cc13a6fa2ee95e4f0e9f1fbefee7afb037c9ac34 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 17 Jun 2025 23:15:00 -0400 Subject: [PATCH 42/52] Fixed some example config parameters --- config.example.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.example.toml b/config.example.toml index 899c6a10..176b6b31 100644 --- a/config.example.toml +++ b/config.example.toml @@ -156,10 +156,10 @@ host = "127.0.0.1" port = 20000 # Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access # OPTIONAL, DEFAULT: 3 -jwt_auth_fail_limit: 3 +jwt_auth_fail_limit = 3 # How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times # OPTIONAL, DEFAULT: 300 -jwt_auth_fail_timeout_seconds: 300 +jwt_auth_fail_timeout_seconds = 300 # For Remote signer: # [signer.remote] From b9514df39929fd283ee5306ae29f3e5beff6ca9e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 7 Jul 2025 15:51:01 -0400 Subject: [PATCH 43/52] Update crates/signer/src/service.rs Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- crates/signer/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 3ca1d5ac..6f2007e6 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -193,7 +193,7 @@ async fn check_jwt_rate_limit( } // Rate limit the request - let remaining = state.jwt_auth_fail_timeout - elapsed; + let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); warn!("Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures"); return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); } From 05fd25a984d1ae20a90456d0def3c2d0d970b8ac Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 7 Jul 2025 16:08:09 -0400 Subject: [PATCH 44/52] Swapping to non-async RwLock based on feedback --- Cargo.lock | 1 + crates/signer/Cargo.toml | 1 + crates/signer/src/service.rs | 24 +++++++++++------------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 703d4687..5182a760 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1571,6 +1571,7 @@ dependencies = [ "headers", "jsonwebtoken", "lazy_static", + "parking_lot", "prometheus", "prost", "rand 0.9.0", diff --git a/crates/signer/Cargo.toml b/crates/signer/Cargo.toml index 4e38da88..569797ac 100644 --- a/crates/signer/Cargo.toml +++ b/crates/signer/Cargo.toml @@ -18,6 +18,7 @@ futures.workspace = true headers.workspace = true jsonwebtoken.workspace = true lazy_static.workspace = true +parking_lot.workspace = true prometheus.workspace = true prost.workspace = true rand.workspace = true diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 3ca1d5ac..77dacf2f 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - net::SocketAddr, + net::{IpAddr, SocketAddr}, sync::Arc, time::{Duration, Instant}, }; @@ -33,6 +33,7 @@ use cb_common::{ use cb_metrics::provider::MetricsProvider; use eyre::Context; use headers::{authorization::Bearer, Authorization}; +use parking_lot::RwLock as ParkingRwLock; use tokio::{net::TcpListener, sync::RwLock}; use tracing::{debug, error, info, warn}; use uuid::Uuid; @@ -65,7 +66,7 @@ struct SigningState { jwts: Arc>, /// Map of JWT failures per peer - jwt_auth_failures: Arc>>, + jwt_auth_failures: Arc>>, // JWT auth failure settings jwt_auth_fail_limit: u32, @@ -84,7 +85,7 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), jwts: config.jwts.into(), - jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), + jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), }; @@ -142,17 +143,17 @@ async fn jwt_auth( next: Next, ) -> Result { // Check if the request needs to be rate limited - let client_ip = addr.ip().to_string(); - check_jwt_rate_limit(&state, &client_ip).await?; + let client_ip = addr.ip(); + check_jwt_rate_limit(&state, &client_ip)?; // Process JWT authorization - match check_jwt_auth(&auth, &state).await { + match check_jwt_auth(&auth, &state) { Ok(module_id) => { req.extensions_mut().insert(module_id); Ok(next.run(req).await) } Err(SignerModuleError::Unauthorized) => { - let mut failures = state.jwt_auth_failures.write().await; + let mut failures = state.jwt_auth_failures.write(); let failure_info = failures .entry(client_ip) .or_insert(JwtAuthFailureInfo { failure_count: 0, last_failure: Instant::now() }); @@ -166,11 +167,8 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures -async fn check_jwt_rate_limit( - state: &SigningState, - client_ip: &String, -) -> Result<(), SignerModuleError> { - let mut failures = state.jwt_auth_failures.write().await; +fn check_jwt_rate_limit(state: &SigningState, client_ip: &IpAddr) -> Result<(), SignerModuleError> { + let mut failures = state.jwt_auth_failures.write(); // Ignore clients that don't have any failures if let Some(failure_info) = failures.get(client_ip) { @@ -203,7 +201,7 @@ async fn check_jwt_rate_limit( } /// Checks if a request can successfully authenticate with the JWT secret -async fn check_jwt_auth( +fn check_jwt_auth( auth: &Authorization, state: &SigningState, ) -> Result { From cc7e87b4d05766cd65cae0574dcda58333c23d6e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 8 Jul 2025 16:24:28 -0400 Subject: [PATCH 45/52] Removed HTTP_TIMEOUT_SECONDS_ENV --- crates/common/src/config/constants.rs | 1 - crates/common/src/config/mux.rs | 7 ++----- crates/common/src/pbs/event.rs | 10 ++-------- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 309eb15e..04fe7146 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -75,7 +75,6 @@ pub const PROXY_DIR_SECRETS_DEFAULT: &str = "/proxy_secrets"; ////////////////////////// MUXER ////////////////////////// /// Timeout for HTTP requests, in seconds -pub const HTTP_TIMEOUT_SECONDS_ENV: &str = "CB_HTTP_TIMEOUT_SECONDS"; pub const HTTP_TIMEOUT_SECONDS_DEFAULT: u64 = 10; /// Max content length for Muxer HTTP responses, in bytes diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 67fb3cf8..205ee0f4 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -18,7 +18,7 @@ use url::Url; use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; use crate::{ - config::{remove_duplicate_keys, safe_read_http_response, HTTP_TIMEOUT_SECONDS_ENV}, + config::{remove_duplicate_keys, safe_read_http_response}, pbs::RelayClient, types::Chain, }; @@ -43,10 +43,7 @@ impl PbsMuxes { chain: Chain, default_pbs: &PbsConfig, ) -> eyre::Result> { - let http_timeout = match load_optional_env_var(HTTP_TIMEOUT_SECONDS_ENV) { - Some(timeout_str) => Duration::from_secs(timeout_str.parse::()?), - None => Duration::from_secs(default_pbs.http_timeout_seconds), - }; + let http_timeout = Duration::from_secs(default_pbs.http_timeout_seconds); let mut muxes = self.muxes; diff --git a/crates/common/src/pbs/event.rs b/crates/common/src/pbs/event.rs index 266fb68c..9bf61f01 100644 --- a/crates/common/src/pbs/event.rs +++ b/crates/common/src/pbs/event.rs @@ -19,10 +19,7 @@ use super::{ GetHeaderParams, GetHeaderResponse, SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, }; use crate::{ - config::{ - load_optional_env_var, BUILDER_URLS_ENV, HTTP_TIMEOUT_SECONDS_DEFAULT, - HTTP_TIMEOUT_SECONDS_ENV, - }, + config::{load_optional_env_var, BUILDER_URLS_ENV, HTTP_TIMEOUT_SECONDS_DEFAULT}, pbs::BUILDER_EVENTS_PATH, }; @@ -64,10 +61,7 @@ impl BuilderEventPublisher { } pub fn new_from_env() -> Result> { - let http_timeout = match load_optional_env_var(HTTP_TIMEOUT_SECONDS_ENV) { - Some(timeout_str) => Duration::from_secs(timeout_str.parse::()?), - None => Duration::from_secs(HTTP_TIMEOUT_SECONDS_DEFAULT), - }; + let http_timeout = Duration::from_secs(HTTP_TIMEOUT_SECONDS_DEFAULT); load_optional_env_var(BUILDER_URLS_ENV) .map(|joined| { From 0d1e395f7ab71947c4bad7a371b592bd05c1ed0a Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 8 Jul 2025 16:34:51 -0400 Subject: [PATCH 46/52] Removed HTTPS restriction for the mux loader --- crates/common/src/config/mux.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 205ee0f4..39e78477 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -13,7 +13,7 @@ use alloy::{ }; use eyre::{bail, ensure, Context}; use serde::{Deserialize, Serialize}; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; use url::Url; use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; @@ -186,7 +186,9 @@ impl MuxKeysLoader { Self::HTTP { url } => { let url = Url::parse(url).wrap_err("failed to parse mux keys URL")?; if url.scheme() != "https" { - bail!("mux keys URL must use HTTPS"); + warn!( + "Mux keys URL {url} is insecure; consider using HTTPS if possible instead" + ); } let client = reqwest::ClientBuilder::new().timeout(http_timeout).build()?; let response = client.get(url).send().await?; From fb7cc102ab583eb3799227beda5a18fee34fbf58 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 8 Jul 2025 16:46:33 -0400 Subject: [PATCH 47/52] Moved scopeguard to dev-dependencies for cb-common --- crates/common/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 2bd2640f..0ab176ad 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -28,7 +28,6 @@ pbkdf2.workspace = true rand.workspace = true rayon.workspace = true reqwest.workspace = true -scopeguard.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true @@ -46,3 +45,6 @@ tree_hash.workspace = true tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true + +[dev-dependencies] +scopeguard.workspace = true From 9709e2ad00b6e66300943b0cf842e53044698aa8 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 8 Jul 2025 17:04:46 -0400 Subject: [PATCH 48/52] Moved CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV --- crates/common/src/config/constants.rs | 3 --- crates/common/src/config/utils.rs | 7 +++---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 04fe7146..4ea28223 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -94,6 +94,3 @@ pub const SIGNER_URL_ENV: &str = "CB_SIGNER_URL"; /// Events modules /// Where to receive builder events pub const BUILDER_PORT_ENV: &str = "CB_BUILDER_PORT"; - -///////////////////////// TESTING CONSTANTS ///////////////////////// -pub const CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV: &str = "CB_TEST_HTTP_DISABLE_CONTENT_LENGTH"; diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 0d466655..83e1a71a 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -6,10 +6,9 @@ use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use super::JWTS_ENV; -use crate::{ - config::{CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, MUXER_HTTP_MAX_LENGTH}, - types::ModuleId, -}; +use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId}; + +pub const CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV: &str = "CB_TEST_HTTP_DISABLE_CONTENT_LENGTH"; pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) From 995919624269ba92de20a87cbb507ee761964049 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 9 Jul 2025 01:26:07 -0400 Subject: [PATCH 49/52] Moved read_chunked_body_with_max to cb-common --- Cargo.lock | 1 + crates/common/Cargo.toml | 1 + crates/common/src/config/utils.rs | 27 ++++++++++--------- crates/common/src/utils.rs | 44 ++++++++++++++++++++++++++++++- crates/pbs/src/utils.rs | 28 +++++++------------- 5 files changed, 69 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e5a38c8..05dbc3f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1495,6 +1495,7 @@ dependencies = [ "ethereum_ssz 0.8.3", "ethereum_ssz_derive", "eyre", + "futures", "jsonwebtoken", "pbkdf2 0.12.2", "rand 0.9.0", diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 0ab176ad..95f6bbc1 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -23,6 +23,7 @@ ethereum_serde_utils.workspace = true ethereum_ssz.workspace = true ethereum_ssz_derive.workspace = true eyre.workspace = true +futures.workspace = true jsonwebtoken.workspace = true pbkdf2.workspace = true rand.workspace = true diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 83e1a71a..db9adf5e 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,12 +1,15 @@ use std::{collections::HashMap, env, path::Path}; use alloy::rpc::types::beacon::BlsPublicKey; -use bytes::{BufMut, BytesMut}; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use super::JWTS_ENV; -use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId}; +use crate::{ + config::MUXER_HTTP_MAX_LENGTH, + types::ModuleId, + utils::{read_chunked_body_with_max, ResponseReadError}, +}; pub const CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV: &str = "CB_TEST_HTTP_DISABLE_CONTENT_LENGTH"; @@ -36,7 +39,7 @@ pub fn load_jwt_secrets() -> Result> { /// Reads an HTTP response safely, erroring out if it failed or if the body is /// too large. -pub async fn safe_read_http_response(mut response: reqwest::Response) -> Result { +pub async fn safe_read_http_response(response: reqwest::Response) -> Result { // Get the content length from the response headers let mut content_length = response.content_length(); if env::var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV).is_ok() { @@ -56,18 +59,16 @@ pub async fn safe_read_http_response(mut response: reqwest::Response) -> Result< } // Read the response to a buffer in chunks - let mut buffer = BytesMut::with_capacity(1024); - while let Some(chunk) = response.chunk().await? { - if buffer.len() > MUXER_HTTP_MAX_LENGTH as usize { - bail!( - "Response body exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)" - ); - } - buffer.put(chunk); - } + let result = read_chunked_body_with_max(response, MUXER_HTTP_MAX_LENGTH as usize).await; + let bytes = match result { + Ok(bytes) => Ok(bytes), + Err(ResponseReadError::PayloadTooLarge { max: _, raw: _ }) => bail!( + "Response body exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)" + ), + Err(ResponseReadError::ChunkError { inner }) => Err(inner), + }?; // Convert the buffer to a string - let bytes = buffer.freeze(); match std::str::from_utf8(&bytes) { Ok(s) => Ok(s.to_string()), Err(e) => bail!("Failed to decode response body as UTF-8: {e}"), diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index a1dcb7cb..9f72214c 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -9,11 +9,13 @@ use alloy::{ }; use axum::http::HeaderValue; use blst::min_pk::{PublicKey, Signature}; +use futures::StreamExt; use rand::{distr::Alphanumeric, Rng}; -use reqwest::header::HeaderMap; +use reqwest::{header::HeaderMap, Error as ReqwestError, Response}; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use ssz::{Decode, Encode}; +use thiserror::Error; use tracing::Level; use tracing_appender::{non_blocking::WorkerGuard, rolling::Rotation}; use tracing_subscriber::{ @@ -31,6 +33,46 @@ use crate::{ const MILLIS_PER_SECOND: u64 = 1_000; +#[derive(Debug, Error)] +pub enum ResponseReadError { + #[error("response size exceeds max size: max: {max} raw: {raw}")] + PayloadTooLarge { max: usize, raw: String }, + + #[error("error reading chunk from response: {inner:?}")] + ChunkError { inner: ReqwestError }, +} + +/// Reads the body of a response as a chunked stream, ensuring the size does not +/// exceed `max_size`. +pub async fn read_chunked_body_with_max( + res: Response, + max_size: usize, +) -> Result, ResponseReadError> { + let mut stream = res.bytes_stream(); + let mut response_bytes = Vec::new(); + + while let Some(chunk) = stream.next().await { + let chunk = match chunk { + Ok(c) => c, + Err(e) => { + return Err(ResponseReadError::ChunkError { inner: e }); + } + }; + if response_bytes.len() + chunk.len() > max_size { + // avoid spamming logs if the message is too large + response_bytes.truncate(1024); + return Err(ResponseReadError::PayloadTooLarge { + max: max_size, + raw: String::from_utf8_lossy(&response_bytes).into_owned(), + }); + } + + response_bytes.extend_from_slice(&chunk); + } + + Ok(response_bytes) +} + pub fn timestamp_of_slot_start_sec(slot: u64, chain: Chain) -> u64 { chain.genesis_time_sec() + slot * chain.slot_time_sec() } diff --git a/crates/pbs/src/utils.rs b/crates/pbs/src/utils.rs index f1673431..98ba57b2 100644 --- a/crates/pbs/src/utils.rs +++ b/crates/pbs/src/utils.rs @@ -1,29 +1,21 @@ -use cb_common::pbs::error::PbsError; -use futures::StreamExt; +use cb_common::{ + pbs::error::PbsError, + utils::{read_chunked_body_with_max as read_chunked_body_with_max_impl, ResponseReadError}, +}; use reqwest::Response; pub async fn read_chunked_body_with_max( res: Response, max_size: usize, ) -> Result, PbsError> { - let mut stream = res.bytes_stream(); - let mut response_bytes = Vec::new(); - - while let Some(chunk) = stream.next().await { - let chunk = chunk?; - if response_bytes.len() + chunk.len() > max_size { - // avoid spamming logs if the message is too large - response_bytes.truncate(1024); - return Err(PbsError::PayloadTooLarge { - max: max_size, - raw: String::from_utf8_lossy(&response_bytes).into_owned(), - }); + let result = read_chunked_body_with_max_impl(res, max_size).await; + match result { + Ok(bytes) => Ok(bytes), + Err(ResponseReadError::PayloadTooLarge { max, raw }) => { + Err(PbsError::PayloadTooLarge { max, raw }) } - - response_bytes.extend_from_slice(&chunk); + Err(ResponseReadError::ChunkError { inner }) => Err(PbsError::Reqwest(inner)), } - - Ok(response_bytes) } const GAS_LIMIT_ADJUSTMENT_FACTOR: u64 = 1024; From 24eeb8c805a84f8ca5d433cdc3076bdc4419521b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 9 Jul 2025 01:42:14 -0400 Subject: [PATCH 50/52] Reduced HTTP error to a warning in PBS too --- crates/common/src/pbs/event.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/common/src/pbs/event.rs b/crates/common/src/pbs/event.rs index 9bf61f01..590908d7 100644 --- a/crates/common/src/pbs/event.rs +++ b/crates/common/src/pbs/event.rs @@ -12,7 +12,7 @@ use eyre::{bail, Result}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use tokio::net::TcpListener; -use tracing::{error, info, trace}; +use tracing::{error, info, trace, warn}; use url::Url; use super::{ @@ -51,7 +51,7 @@ impl BuilderEventPublisher { pub fn new(endpoints: Vec, http_timeout: Duration) -> Result { for endpoint in &endpoints { if endpoint.scheme() != "https" { - bail!("BuilderEventPublisher endpoints must use HTTPS (endpoint {endpoint} is invalid)"); + warn!("BuilderEventPublisher endpoint {endpoint} is insecure, consider using HTTPS if possible instead"); } } Ok(Self { From 72941ec7f63e2233b19b502e68e61803b60b2fe1 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 9 Jul 2025 22:23:40 -0400 Subject: [PATCH 51/52] Refactored read_chunked_body_with_max, added http_timeout to Lido key retrieval --- Cargo.lock | 42 --------- Cargo.toml | 2 - crates/common/Cargo.toml | 4 - crates/common/src/config/constants.rs | 2 +- crates/common/src/config/mux.rs | 93 ++++++++++++------- crates/common/src/config/utils.rs | 65 ++++++------- crates/common/src/pbs/error.rs | 8 +- crates/common/src/pbs/event.rs | 5 +- crates/common/src/utils.rs | 51 +++++++--- crates/pbs/src/mev_boost/get_header.rs | 7 +- .../pbs/src/mev_boost/register_validator.rs | 3 +- crates/pbs/src/mev_boost/status.rs | 3 +- crates/pbs/src/mev_boost/submit_block.rs | 3 +- crates/pbs/src/utils.rs | 20 ---- 14 files changed, 137 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05dbc3f5..63de92dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1501,11 +1501,9 @@ dependencies = [ "rand 0.9.0", "rayon", "reqwest", - "scopeguard", "serde", "serde_json", "serde_yaml", - "serial_test", "sha2 0.10.8", "ssz_types", "thiserror 2.0.12", @@ -4374,15 +4372,6 @@ dependencies = [ "cipher 0.3.0", ] -[[package]] -name = "scc" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22b2d775fb28f245817589471dd49c5edf64237f4a19d10ce9a92ff4651a27f4" -dependencies = [ - "sdd", -] - [[package]] name = "schannel" version = "0.1.27" @@ -4410,12 +4399,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "sdd" -version = "3.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584e070911c7017da6cb2eb0788d09f43d789029b5877d3e5ecc8acf86ceee21" - [[package]] name = "sec1" version = "0.7.3" @@ -4620,31 +4603,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serial_test" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" -dependencies = [ - "futures", - "log", - "once_cell", - "parking_lot", - "scc", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "sha1" version = "0.10.6" diff --git a/Cargo.toml b/Cargo.toml index 6fd0c23b..5102238b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,11 +55,9 @@ prost = "0.13.4" rand = { version = "0.9", features = ["os_rng"] } rayon = "1.10.0" reqwest = { version = "0.12.4", features = ["json", "stream"] } -scopeguard = "1.2.0" serde = { version = "1.0.202", features = ["derive"] } serde_json = "1.0.117" serde_yaml = "0.9.33" -serial_test = "3.2.0" sha2 = "0.10.8" ssz_types = "0.10" tempfile = "3.20.0" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 95f6bbc1..fe2f9aec 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -32,7 +32,6 @@ reqwest.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true -serial_test.workspace = true sha2.workspace = true ssz_types.workspace = true thiserror.workspace = true @@ -46,6 +45,3 @@ tree_hash.workspace = true tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true - -[dev-dependencies] -scopeguard.workspace = true diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 4ea28223..8b07f732 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -78,7 +78,7 @@ pub const PROXY_DIR_SECRETS_DEFAULT: &str = "/proxy_secrets"; pub const HTTP_TIMEOUT_SECONDS_DEFAULT: u64 = 10; /// Max content length for Muxer HTTP responses, in bytes -pub const MUXER_HTTP_MAX_LENGTH: u64 = 1024 * 1024 * 1024 * 10; // 10 MiB +pub const MUXER_HTTP_MAX_LENGTH: usize = 1024 * 1024 * 10; // 10 MiB ///////////////////////// MODULES ///////////////////////// diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs index 79bb8db0..487a5909 100644 --- a/crates/common/src/config/mux.rs +++ b/crates/common/src/config/mux.rs @@ -8,10 +8,12 @@ use std::{ use alloy::{ primitives::{address, Address, U256}, providers::ProviderBuilder, - rpc::types::beacon::BlsPublicKey, + rpc::{client::RpcClient, types::beacon::BlsPublicKey}, sol, + transports::http::Http, }; use eyre::{bail, ensure, Context}; +use reqwest::Client; use serde::{Deserialize, Serialize}; use tracing::{debug, info, warn}; use url::Url; @@ -193,8 +195,8 @@ impl MuxKeysLoader { } let client = reqwest::ClientBuilder::new().timeout(http_timeout).build()?; let response = client.get(url).send().await?; - let pubkeys = safe_read_http_response(response).await?; - serde_json::from_str(&pubkeys) + let pubkey_bytes = safe_read_http_response(response).await?; + serde_json::from_slice(&pubkey_bytes) .wrap_err("failed to fetch mux keys from HTTP endpoint") } @@ -204,7 +206,13 @@ impl MuxKeysLoader { bail!("Lido registry requires RPC URL to be set in the PBS config"); }; - fetch_lido_registry_keys(rpc_url, chain, U256::from(*node_operator_id)).await + fetch_lido_registry_keys( + rpc_url, + chain, + U256::from(*node_operator_id), + http_timeout, + ) + .await } NORegistry::SSV => { fetch_ssv_pubkeys(chain, U256::from(*node_operator_id), http_timeout).await @@ -254,10 +262,17 @@ async fn fetch_lido_registry_keys( rpc_url: Url, chain: Chain, node_operator_id: U256, + http_timeout: Duration, ) -> eyre::Result> { debug!(?chain, %node_operator_id, "loading operator keys from Lido registry"); - let provider = ProviderBuilder::new().on_http(rpc_url); + // Create an RPC provider with HTTP timeout support + let client = Client::builder().timeout(http_timeout).build()?; + let http = Http::with_client(client, rpc_url); + let is_local = http.guess_local(); + let rpc_client = RpcClient::new(http, is_local); + let provider = ProviderBuilder::new().on_client(rpc_client); + let registry_address = lido_registry_address(chain)?; let registry = LidoRegistry::new(registry_address, provider); @@ -362,9 +377,8 @@ async fn fetch_ssv_pubkeys_from_url( })?; // Parse the response as JSON - let body_string = safe_read_http_response(response).await?; - serde_json::from_slice::(body_string.as_bytes()) - .wrap_err("failed to parse SSV response") + let body_bytes = safe_read_http_response(response).await?; + serde_json::from_slice::(&body_bytes).wrap_err("failed to parse SSV response") } #[derive(Deserialize)] @@ -386,19 +400,17 @@ struct SSVPagination { #[cfg(test)] mod tests { - use std::{env, net::SocketAddr}; + use std::net::SocketAddr; use alloy::{hex::FromHex, primitives::U256, providers::ProviderBuilder}; use axum::{response::Response, routing::get}; - use scopeguard::defer; - use serial_test::serial; use tokio::{net::TcpListener, task::JoinHandle}; use url::Url; use super::*; - use crate::config::{ - CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, HTTP_TIMEOUT_SECONDS_DEFAULT, - MUXER_HTTP_MAX_LENGTH, + use crate::{ + config::{HTTP_TIMEOUT_SECONDS_DEFAULT, MUXER_HTTP_MAX_LENGTH}, + utils::{set_ignore_content_length, ResponseReadError}, }; const TEST_HTTP_TIMEOUT: u64 = 2; @@ -471,25 +483,28 @@ mod tests { } #[tokio::test] - #[serial] /// Tests that the SSV network fetch is handled properly when the response's /// body is too large async fn test_ssv_network_fetch_big_data() -> eyre::Result<()> { // Start the mock server let port = 30101; - env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/big_data"); let response = fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(120)).await; // The response should fail due to content length being too big - assert!(response.is_err(), "Expected error due to big content length, but got success"); - if let Err(e) = response { - assert!( - e.to_string().contains("content length") && - e.to_string().contains("exceeds the maximum allowed length"), - "Expected content length error, got: {e}", - ); + match response { + Ok(_) => { + panic!("Expected an error due to big content length, but got a successful response") + } + Err(e) => match e.downcast_ref::() { + Some(ResponseReadError::PayloadTooLarge { max, content_length, raw }) => { + assert_eq!(*max, MUXER_HTTP_MAX_LENGTH); + assert!(*content_length > MUXER_HTTP_MAX_LENGTH); + assert!(raw.is_empty()); + } + _ => panic!("Expected PayloadTooLarge error, got: {}", e), + }, } // Clean up the server handle @@ -522,25 +537,29 @@ mod tests { } #[tokio::test] - #[serial] /// Tests that the SSV network fetch is handled properly when the response's /// content-length header is missing async fn test_ssv_network_fetch_big_data_without_content_length() -> eyre::Result<()> { // Start the mock server let port = 30103; - env::set_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV, "1"); - defer! { env::remove_var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV); } + set_ignore_content_length(true); let _server_handle = create_mock_server(port).await?; let url = format!("http://localhost:{port}/big_data"); let response = fetch_ssv_pubkeys_from_url(&url, Duration::from_secs(120)).await; - // The response should fail due to timeout - assert!(response.is_err(), "Expected error due to body size, but got success"); - if let Err(e) = response { - assert!( - e.to_string().contains("Response body exceeds the maximum allowed length "), - "Expected content length error, got: {e}", - ); + // The response should fail due to the body being too big + match response { + Ok(_) => { + panic!("Expected an error due to excessive data, but got a successful response") + } + Err(e) => match e.downcast_ref::() { + Some(ResponseReadError::PayloadTooLarge { max, content_length, raw }) => { + assert_eq!(*max, MUXER_HTTP_MAX_LENGTH); + assert_eq!(*content_length, 0); + assert!(!raw.is_empty()); + } + _ => panic!("Expected PayloadTooLarge error, got: {}", e), + }, } // Clean up the server handle @@ -585,10 +604,12 @@ mod tests { .unwrap() } - /// Sends a response with a large body but no content length + /// Sends a response with a large body - larger than the maximum allowed. + /// Note that hyper overwrites the content-length header automatically, so + /// setting it here wouldn't actually change the value that ultimately + /// gets sent to the server. async fn handle_big_data() -> Response { - // Create a response with a large body but no content length - let body = "f".repeat(2 * MUXER_HTTP_MAX_LENGTH as usize); + let body = "f".repeat(2 * MUXER_HTTP_MAX_LENGTH); Response::builder() .status(200) .header("Content-Type", "application/text") diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index db9adf5e..f914e24c 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,17 +1,11 @@ -use std::{collections::HashMap, env, path::Path}; +use std::{collections::HashMap, path::Path}; use alloy::rpc::types::beacon::BlsPublicKey; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use super::JWTS_ENV; -use crate::{ - config::MUXER_HTTP_MAX_LENGTH, - types::ModuleId, - utils::{read_chunked_body_with_max, ResponseReadError}, -}; - -pub const CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV: &str = "CB_TEST_HTTP_DISABLE_CONTENT_LENGTH"; +use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId, utils::read_chunked_body_with_max}; pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) @@ -39,40 +33,33 @@ pub fn load_jwt_secrets() -> Result> { /// Reads an HTTP response safely, erroring out if it failed or if the body is /// too large. -pub async fn safe_read_http_response(response: reqwest::Response) -> Result { - // Get the content length from the response headers - let mut content_length = response.content_length(); - if env::var(CB_TEST_HTTP_DISABLE_CONTENT_LENGTH_ENV).is_ok() { - content_length = None; - } - - // Break if content length is provided but it's too big - if let Some(length) = content_length { - if length > MUXER_HTTP_MAX_LENGTH { - bail!("Response content length ({length}) exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)"); - } - } +pub async fn safe_read_http_response(response: reqwest::Response) -> Result> { + // Read the response to a buffer in chunks + let status_code = response.status(); + let response_bytes = read_chunked_body_with_max(response, MUXER_HTTP_MAX_LENGTH) + .await + .map_err(|e| eyre::Report::new(e)); // Make sure the response is a 200 - if response.status() != reqwest::StatusCode::OK { - bail!("Request failed with status: {}", response.status()); - } - - // Read the response to a buffer in chunks - let result = read_chunked_body_with_max(response, MUXER_HTTP_MAX_LENGTH as usize).await; - let bytes = match result { - Ok(bytes) => Ok(bytes), - Err(ResponseReadError::PayloadTooLarge { max: _, raw: _ }) => bail!( - "Response body exceeds the maximum allowed length ({MUXER_HTTP_MAX_LENGTH} bytes)" - ), - Err(ResponseReadError::ChunkError { inner }) => Err(inner), - }?; - - // Convert the buffer to a string - match std::str::from_utf8(&bytes) { - Ok(s) => Ok(s.to_string()), - Err(e) => bail!("Failed to decode response body as UTF-8: {e}"), + if status_code != reqwest::StatusCode::OK { + match response_bytes { + Ok(bytes) => { + bail!( + "Request failed with status: {}, body: {}", + status_code, + String::from_utf8_lossy(&bytes) + ); + } + Err(e) => { + bail!( + "Request failed with status: {} but decoding the response body failed: {}", + status_code, + e + ); + } + } } + response_bytes } /// Removes duplicate entries from a vector of BlsPublicKey diff --git a/crates/common/src/pbs/error.rs b/crates/common/src/pbs/error.rs index 242cb90e..9b42a626 100644 --- a/crates/common/src/pbs/error.rs +++ b/crates/common/src/pbs/error.rs @@ -4,7 +4,7 @@ use alloy::{ }; use thiserror::Error; -use crate::error::BlstErrorWrapper; +use crate::{error::BlstErrorWrapper, utils::ResponseReadError}; #[derive(Debug, Error)] pub enum PbsError { @@ -17,12 +17,12 @@ pub enum PbsError { #[error("json decode error: {err:?}, raw: {raw}")] JsonDecode { err: serde_json::Error, raw: String }, + #[error("{0}")] + ReadResponse(#[from] ResponseReadError), + #[error("relay response error. Code: {code}, err: {error_msg:?}")] RelayResponse { error_msg: String, code: u16 }, - #[error("response size exceeds max size: max: {max} raw: {raw}")] - PayloadTooLarge { max: usize, raw: String }, - #[error("failed validating relay response: {0}")] Validation(#[from] ValidationError), diff --git a/crates/common/src/pbs/event.rs b/crates/common/src/pbs/event.rs index 590908d7..98276d14 100644 --- a/crates/common/src/pbs/event.rs +++ b/crates/common/src/pbs/event.rs @@ -54,10 +54,7 @@ impl BuilderEventPublisher { warn!("BuilderEventPublisher endpoint {endpoint} is insecure, consider using HTTPS if possible instead"); } } - Ok(Self { - client: reqwest::ClientBuilder::new().timeout(http_timeout).build().unwrap(), - endpoints, - }) + Ok(Self { client: reqwest::ClientBuilder::new().timeout(http_timeout).build()?, endpoints }) } pub fn new_from_env() -> Result> { diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 9f72214c..6d7d042d 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -1,4 +1,5 @@ use std::{ + cell::Cell, net::Ipv4Addr, time::{SystemTime, UNIX_EPOCH}, }; @@ -11,7 +12,7 @@ use axum::http::HeaderValue; use blst::min_pk::{PublicKey, Signature}; use futures::StreamExt; use rand::{distr::Alphanumeric, Rng}; -use reqwest::{header::HeaderMap, Error as ReqwestError, Response}; +use reqwest::{header::HeaderMap, Response}; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use ssz::{Decode, Encode}; @@ -35,11 +36,25 @@ const MILLIS_PER_SECOND: u64 = 1_000; #[derive(Debug, Error)] pub enum ResponseReadError { - #[error("response size exceeds max size: max: {max} raw: {raw}")] - PayloadTooLarge { max: usize, raw: String }, + #[error( + "response size exceeds max size; max: {max}, content_length: {content_length}, raw: {raw}" + )] + PayloadTooLarge { max: usize, content_length: usize, raw: String }, - #[error("error reading chunk from response: {inner:?}")] - ChunkError { inner: ReqwestError }, + #[error("error reading response stream: {0}")] + ReqwestError(#[from] reqwest::Error), +} + +thread_local! { + static IGNORE_CONTENT_LENGTH: Cell = const { Cell::new(false) }; +} + +pub fn set_ignore_content_length(val: bool) { + IGNORE_CONTENT_LENGTH.with(|f| f.set(val)); +} + +fn should_ignore_content_length() -> bool { + IGNORE_CONTENT_LENGTH.with(|f| f.get()) } /// Reads the body of a response as a chunked stream, ensuring the size does not @@ -48,21 +63,35 @@ pub async fn read_chunked_body_with_max( res: Response, max_size: usize, ) -> Result, ResponseReadError> { + // Get the content length from the response headers + let mut content_length = res.content_length(); + if should_ignore_content_length() { + // Used for testing purposes to ignore content length + content_length = None; + } + + // Break if content length is provided but it's too big + if let Some(length) = content_length { + if length as usize > max_size { + return Err(ResponseReadError::PayloadTooLarge { + max: max_size, + content_length: length as usize, + raw: String::new(), // raw content is not available here + }); + } + } + let mut stream = res.bytes_stream(); let mut response_bytes = Vec::new(); while let Some(chunk) = stream.next().await { - let chunk = match chunk { - Ok(c) => c, - Err(e) => { - return Err(ResponseReadError::ChunkError { inner: e }); - } - }; + let chunk = chunk?; if response_bytes.len() + chunk.len() > max_size { // avoid spamming logs if the message is too large response_bytes.truncate(1024); return Err(ResponseReadError::PayloadTooLarge { max: max_size, + content_length: content_length.unwrap_or(0) as usize, raw: String::from_utf8_lossy(&response_bytes).into_owned(), }); } diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index e4922245..85e3cf2c 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -19,7 +19,10 @@ use cb_common::{ signature::verify_signed_message, signer::BlsSignature, types::Chain, - utils::{get_user_agent_with_version, ms_into_slot, timestamp_of_slot_start_sec, utcnow_ms}, + utils::{ + get_user_agent_with_version, ms_into_slot, read_chunked_body_with_max, + timestamp_of_slot_start_sec, utcnow_ms, + }, }; use futures::future::join_all; use parking_lot::RwLock; @@ -36,7 +39,7 @@ use crate::{ }, metrics::{RELAY_HEADER_VALUE, RELAY_LAST_SLOT, RELAY_LATENCY, RELAY_STATUS_CODE}, state::{BuilderApiState, PbsState}, - utils::{check_gas_limit, read_chunked_body_with_max}, + utils::check_gas_limit, }; /// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader diff --git a/crates/pbs/src/mev_boost/register_validator.rs b/crates/pbs/src/mev_boost/register_validator.rs index c99f5d5f..5d2b5f1e 100644 --- a/crates/pbs/src/mev_boost/register_validator.rs +++ b/crates/pbs/src/mev_boost/register_validator.rs @@ -4,7 +4,7 @@ use alloy::rpc::types::beacon::relay::ValidatorRegistration; use axum::http::{HeaderMap, HeaderValue}; use cb_common::{ pbs::{error::PbsError, RelayClient, HEADER_START_TIME_UNIX_MS}, - utils::{get_user_agent_with_version, utcnow_ms}, + utils::{get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, }; use eyre::bail; use futures::future::{join_all, select_ok}; @@ -16,7 +16,6 @@ use crate::{ constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, state::{BuilderApiState, PbsState}, - utils::read_chunked_body_with_max, }; /// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator diff --git a/crates/pbs/src/mev_boost/status.rs b/crates/pbs/src/mev_boost/status.rs index 591e7cd1..b1a82e57 100644 --- a/crates/pbs/src/mev_boost/status.rs +++ b/crates/pbs/src/mev_boost/status.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; use axum::http::HeaderMap; use cb_common::{ pbs::{error::PbsError, RelayClient}, - utils::get_user_agent_with_version, + utils::{get_user_agent_with_version, read_chunked_body_with_max}, }; use futures::future::select_ok; use reqwest::header::USER_AGENT; @@ -13,7 +13,6 @@ use crate::{ constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, state::{BuilderApiState, PbsState}, - utils::read_chunked_body_with_max, }; /// Implements https://ethereum.github.io/builder-specs/#/Builder/status diff --git a/crates/pbs/src/mev_boost/submit_block.rs b/crates/pbs/src/mev_boost/submit_block.rs index abb9554f..5b781e01 100644 --- a/crates/pbs/src/mev_boost/submit_block.rs +++ b/crates/pbs/src/mev_boost/submit_block.rs @@ -8,7 +8,7 @@ use cb_common::{ PayloadAndBlobsDeneb, PayloadAndBlobsElectra, RelayClient, SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, VersionedResponse, HEADER_START_TIME_UNIX_MS, }, - utils::{get_user_agent_with_version, utcnow_ms}, + utils::{get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, }; use futures::future::select_ok; use reqwest::header::USER_AGENT; @@ -21,7 +21,6 @@ use crate::{ }, metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, state::{BuilderApiState, PbsState}, - utils::read_chunked_body_with_max, }; /// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock diff --git a/crates/pbs/src/utils.rs b/crates/pbs/src/utils.rs index 98ba57b2..782ae79b 100644 --- a/crates/pbs/src/utils.rs +++ b/crates/pbs/src/utils.rs @@ -1,23 +1,3 @@ -use cb_common::{ - pbs::error::PbsError, - utils::{read_chunked_body_with_max as read_chunked_body_with_max_impl, ResponseReadError}, -}; -use reqwest::Response; - -pub async fn read_chunked_body_with_max( - res: Response, - max_size: usize, -) -> Result, PbsError> { - let result = read_chunked_body_with_max_impl(res, max_size).await; - match result { - Ok(bytes) => Ok(bytes), - Err(ResponseReadError::PayloadTooLarge { max, raw }) => { - Err(PbsError::PayloadTooLarge { max, raw }) - } - Err(ResponseReadError::ChunkError { inner }) => Err(PbsError::Reqwest(inner)), - } -} - const GAS_LIMIT_ADJUSTMENT_FACTOR: u64 = 1024; const GAS_LIMIT_MINIMUM: u64 = 5_000; From 0c1a35f4aa0136ecaec39bb157a903445ea26e01 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 10 Jul 2025 13:29:14 -0400 Subject: [PATCH 52/52] Update crates/common/src/config/utils.rs Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- crates/common/src/config/utils.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index f914e24c..1bd4b92b 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -37,8 +37,7 @@ pub async fn safe_read_http_response(response: reqwest::Response) -> Result