From c68125d48d8e546bf2dce27ca311da6896b47b13 Mon Sep 17 00:00:00 2001 From: eltitanb Date: Wed, 2 Apr 2025 15:03:57 +0100 Subject: [PATCH 01/35] bump version --- crates/signer/src/proto/v1.rs | 205 +++++++++++++++++++++++----------- 1 file changed, 141 insertions(+), 64 deletions(-) diff --git a/crates/signer/src/proto/v1.rs b/crates/signer/src/proto/v1.rs index ba8012c3..36984aa0 100644 --- a/crates/signer/src/proto/v1.rs +++ b/crates/signer/src/proto/v1.rs @@ -24,8 +24,7 @@ impl ResponseState { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic - /// use. + /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Self::Unknown => "UNKNOWN", @@ -90,9 +89,10 @@ pub mod lister_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct ListerClient { inner: tonic::client::Grpc, @@ -136,15 +136,16 @@ pub mod lister_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { ListerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -175,11 +176,18 @@ pub mod lister_client { pub async fn list_accounts( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Lister/ListAccounts"); let mut req = request.into_request(); @@ -239,9 +247,10 @@ pub mod account_manager_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct AccountManagerClient { inner: tonic::client::Grpc, @@ -285,15 +294,16 @@ pub mod account_manager_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { AccountManagerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -324,11 +334,18 @@ pub mod account_manager_client { pub async fn unlock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Unlock"); let mut req = request.into_request(); @@ -338,11 +355,18 @@ pub mod account_manager_client { pub async fn lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Lock"); let mut req = request.into_request(); @@ -352,14 +376,25 @@ pub mod account_manager_client { pub async fn generate( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Generate"); + let path = http::uri::PathAndQuery::from_static( + "/v1.AccountManager/Generate", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.AccountManager", "Generate")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.AccountManager", "Generate")); self.inner.unary(req, path, codec).await } } @@ -486,9 +521,10 @@ pub mod signer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct SignerClient { inner: tonic::client::Grpc, @@ -532,15 +568,16 @@ pub mod signer_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { SignerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -572,9 +609,14 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Sign"); let mut req = request.into_request(); @@ -584,10 +626,18 @@ pub mod signer_client { pub async fn multisign( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Multisign"); let mut req = request.into_request(); @@ -598,39 +648,66 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestation"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestation", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_attestations( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestations"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestations", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_proposal( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconProposal"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconProposal", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); self.inner.unary(req, path, codec).await } } From d9979a239eaf6fc0365a30c39cf81264d34d9edd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 5 May 2025 17:13:43 -0400 Subject: [PATCH 02/35] Successful cross-compilation, but runtime has memory allocation issues --- provisioning/signer.Dockerfile | 70 +++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 85c2be43..bc258b47 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,22 +1,72 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM WORKDIR /app -FROM chef AS planner +# Planner stage +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +# Builder stage +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY --from=planner /app/recipe.json recipe.json - -RUN cargo chef cook --release --recipe-path recipe.json - -RUN apt-get update && apt-get install -y protobuf-compiler - COPY . . -RUN cargo build --release --bin commit-boost-signer +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + TARGET="aarch64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ + export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + rustup target add x86_64-unknown-linux-gnu && \ + TARGET="x86_64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + fi && \ + # Build the signer - general setup that works with or without cross-compilation + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ + fi -FROM debian:bookworm-20240904-slim AS runtime +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ From 97ef653d602dbf9397de54abdc48ba21f063eb9e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:09:49 -0400 Subject: [PATCH 03/35] Working with OpenSSL static-linked --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 3 +++ crates/common/Cargo.toml | 4 ++++ crates/common/build.rs | 8 ++++++++ provisioning/signer.Dockerfile | 20 +++++++++++--------- 5 files changed, 38 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..436d3b65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,6 +1494,8 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", + "k256", + "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3550,6 +3552,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.0+3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.106" @@ -3558,6 +3569,7 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index aef26a94..14cddf82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,9 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" +[workspace.features] +openssl-vendored = ["crates/common/openssl-vendored"] + [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..15c0b8d1 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,3 +41,7 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true +openssl = { version = "0.10", optional = true, features = ["vendored"] } + +[features] +openssl-vendored = ["openssl/vendored"] diff --git a/crates/common/build.rs b/crates/common/build.rs index 9bd10ecb..c24a54cb 100644 --- a/crates/common/build.rs +++ b/crates/common/build.rs @@ -1,6 +1,14 @@ use std::process::Command; fn main() { + let target = std::env::var("TARGET").unwrap(); + let host = std::env::var("HOST").unwrap(); + + if target != host { + println!("cargo:warning=Skipping build script because TARGET != HOST"); + return; + } + let output = Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); let git_hash = String::from_utf8(output.stdout).unwrap(); println!("cargo:rustc-env=GIT_HASH={git_hash}"); diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index bc258b47..523a2ff4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,16 +35,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars - dpkg --add-architecture arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + #dpkg --add-architecture arm64 && \ apt update && \ apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - rustup target add aarch64-unknown-linux-gnu && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ - export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + # export PKG_CONFIG_ALLOW_CROSS="true" && \ + # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars dpkg --add-architecture amd64 && \ @@ -55,12 +56,13 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ TARGET_FLAG="--target=${TARGET}" && \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ From 91eefe2de57a28c6ddbda38666046cbc711f93d6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:43:02 -0400 Subject: [PATCH 04/35] Got dynamic linking working, added a feature flag to toggle dynamic vs. static --- provisioning/signer.Dockerfile | 49 ++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 523a2ff4..3c29075d 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,17 +1,17 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app # Planner stage FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY --from=planner /app/recipe.json recipe.json COPY . . @@ -34,30 +34,45 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ - #dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + apt install -y gcc-aarch64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - # export PKG_CONFIG_ALLOW_CROSS="true" && \ - # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="x86_64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ From de09415b8fd994f1b74ed772787aabfd4ac52234 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 13:13:55 -0400 Subject: [PATCH 05/35] Fixed the vendored build arg --- provisioning/signer.Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 3c29075d..984ba9b4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -46,7 +46,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="aarch64-unknown-linux-gnu" && \ @@ -66,7 +67,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="x86_64-unknown-linux-gnu" && \ From 3aee63d1a00c70fce4e86a1a1600f134a2437b41 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 15:35:58 -0400 Subject: [PATCH 06/35] Reintroduced the cargo chef setup --- provisioning/signer.Dockerfile | 104 ++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 41 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 984ba9b4..6de707f0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -12,72 +12,94 @@ RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip -# Build the application +# Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture arm64 && \ apt update && \ apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="aarch64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture amd64 && \ apt update && \ apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="x86_64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + source ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files +COPY . . + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ fi && \ - # Build the signer - general setup that works with or without cross-compilation - # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From c07c71784ee4c557f8fb778f9db2ef0b328624ae Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 16:36:57 -0400 Subject: [PATCH 07/35] Ported the cross-compilation stuff into PBS --- provisioning/pbs.Dockerfile | 112 ++++++++++++++++++++++++++++++--- provisioning/signer.Dockerfile | 22 ++++--- 2 files changed, 120 insertions(+), 14 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 200c95d2..cac14de0 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,22 +1,120 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app -FROM chef AS planner +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --recipe-path recipe.json +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi -RUN apt-get update && apt-get install -y protobuf-compiler +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Now we can copy the source files - chef cook wants to run before this step COPY . . -RUN cargo build --release --bin commit-boost-pbs +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip -FROM debian:bookworm-20240904-slim AS runtime +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ + fi + +# Assemble the runner image +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6de707f0..354afee0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,8 +35,6 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 @@ -57,22 +55,26 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ fi # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - source ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} -# Now we can copy the source files +# Now we can copy the source files - chef cook wants to run before this step COPY . . # Get the latest Protoc since the one in the Debian repo is incredibly old @@ -95,11 +97,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From 699b7ec9eeb4fe2c5d1398095047b82df81afc26 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 13:52:08 -0400 Subject: [PATCH 08/35] Split the dockerfiles into separate builder / image definitions --- .gitignore | 3 +- build-linux.sh | 144 +++++++++++++++++++++++++++++++++ provisioning/build.Dockerfile | 120 +++++++++++++++++++++++++++ provisioning/cli.Dockerfile | 0 provisioning/pbs.Dockerfile | 137 +++---------------------------- provisioning/signer.Dockerfile | 6 +- 6 files changed, 277 insertions(+), 133 deletions(-) create mode 100755 build-linux.sh create mode 100644 provisioning/build.Dockerfile create mode 100644 provisioning/cli.Dockerfile diff --git a/.gitignore b/.gitignore index b8eaa77a..e48792b4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # will have compiled files and executables debug/ target/ +build/ # These are backup files generated by rustfmt **/*.rs.bk @@ -14,4 +15,4 @@ cb.docker-compose.yml targets.json .idea/ logs -.vscode/ \ No newline at end of file +.vscode/ diff --git a/build-linux.sh b/build-linux.sh new file mode 100755 index 00000000..a7266bd9 --- /dev/null +++ b/build-linux.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# This script will build the Commit-Boost applications and modules for local Linux development. + +# ================= +# === Functions === +# ================= + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + + +# Builds the CLI binaries for Linux +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_cli() { + echo "Building CLI binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + echo "done!" +} + + +# Builds the PBS module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_pbs() { + echo "Building PBS binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building PBS Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." + fi + echo "done!" +} + + +# Builds the Signer module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_signer() { + echo "Building Signer binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building Signer Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." + fi + echo "done!" +} + + +# Print usage +usage() { + echo "Usage: build.sh [options] -v " + echo "This script assumes it is in the commit-boost-client repository directory." + echo "Options:" + echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' + echo $'\t-c\tBuild the Commit-Boost CLI binaries' + echo $'\t-p\tBuild the PBS module binary and its Docker container' + echo $'\t-s\tBuild the Signer module binary and its Docker container' + echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' + exit 0 +} + + +# ================= +# === Main Body === +# ================= + +# Parse arguments +while getopts "acpsov:" FLAG; do + case "$FLAG" in + a) CLI=true PBS=true SIGNER=true ;; + c) CLI=true ;; + p) PBS=true ;; + s) SIGNER=true ;; + o) LOCAL_UPLOAD=true ;; + v) VERSION="$OPTARG" ;; + *) usage ;; + esac +done +if [ -z "$VERSION" ]; then + usage +fi + +# Cleanup old artifacts +rm -rf build/$VERSION/* +mkdir -p build/$VERSION + +# Make a multiarch builder, ignore if it's already there +docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 +# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: +# https://stackoverflow.com/a/73585243 + +# Build the artifacts +if [ "$CLI" = true ]; then + build_cli +fi +if [ "$PBS" = true ]; then + build_pbs +fi +if [ "$SIGNER" = true ]; then + build_signer +fi diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile new file mode 100644 index 00000000..83679ed5 --- /dev/null +++ b/provisioning/build.Dockerfile @@ -0,0 +1,120 @@ +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +WORKDIR /app + +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) +ENV BUILD_VAR_SCRIPT=/tmp/env.sh +COPY --from=planner /app/recipe.json recipe.json + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files - chef cook wants to run before this step +COPY . . + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ + fi + +# Copy the output +FROM scratch AS output +ARG TARGET_CRATE +COPY --from=builder /app/target/release/${TARGET_CRATE} /${TARGET_CRATE} diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile new file mode 100644 index 00000000..e69de29b diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index cac14de0..9eff5890 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,138 +1,19 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-pbs /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 354afee0..6c5ac045 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -115,6 +115,7 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ fi +# Assemble the runner image FROM debian:bookworm-slim AS runtime WORKDIR /app @@ -133,7 +134,4 @@ RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] \ No newline at end of file From 7165f129ae7a299b69649c7904ef3b30787ee86e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 17:48:42 -0400 Subject: [PATCH 09/35] Added a build guide --- docs/docs/get_started/building.md | 185 ++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 docs/docs/get_started/building.md diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md new file mode 100644 index 00000000..d38b447f --- /dev/null +++ b/docs/docs/get_started/building.md @@ -0,0 +1,185 @@ +# Building Commit-Boost from Source + +Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). This guide will walk you through the setup required to build them from source. It assumes you are on a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS). For other systems, please adapt the steps for your system's package manager accordingly. + + +## Building via the Docker Builder + +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. + +To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. + +:::note +The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +::: + +We provide a build script called `build-linux.sh` to automate the process: + +``` +$ ./build-linux.sh +Usage: build.sh [options] -v +This script assumes it is in the commit-boost-client repository directory. +Options: + -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) + -c Build the Commit-Boost CLI binaries + -p Build the PBS module binary and its Docker container + -s Build the Signer module binary and its Docker container + -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY +``` + +The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. + +The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. + + +## Building Manually + +If you don't want to use the Docker builder, you can compile the Commit-Boost artifacts locally. The following instructions assume a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS) for simplicity. For other systems, please adapt any relevant instructions to your environment accordingly. + + +### Prerequisites + +Requirements: + +- Rust 1.83+ +- GCC (or another C compiler of your choice) +- OpenSSL development libraries +- Protobuf Compiler (`protoc`) + +Start by installing Rust if you don't already have it. Follow [the official directions](https://www.rust-lang.org/learn/get-started) to install it and bring it up to date. + +Install the dependencies: + +```bash +sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-dev build-essential pkg-config curl +``` + +Install the Protobuf compiler: + +:::note +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +::: + +```bash +PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') +MACHINE_ARCH=$(uname -m) +case "${MACHINE_ARCH}" in + aarch64) PROTOC_ARCH=aarch_64;; + x86_64) PROTOC_ARCH=x86_64;; + *) echo "${MACHINE_ARCH} is not supported."; exit 1;; +esac +curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip +sudo unzip -q protoc.zip bin/protoc -d /usr +sudo unzip -q protoc.zip "include/google/*" -d /usr +sudo chmod a+x /usr/bin/protoc +rm -rf protoc.zip +``` + +With the prerequisites set up, pull the repository: +```bash +git clone https://github.com/Commit-Boost/commit-boost-client +``` + +Check out the `stable` branch which houses the latest release: +```bash +cd commit-boost-client && git checkout stable +``` + +Finally, update the submodules: +``` +git submodule update --init --recursive +``` + +Your build environment should now be ready to use. + + +### Building the CLI + +To build the CLI, run: +``` +cargo build --release --bin commit-boost-cli +``` + +This will create a binary in `./target/release/commit-boost-cli`. Confirm that it works: +``` +./target/release/commit-boost-cli --version +``` + +You can now use this to generate the Docker Compose file to drive the other modules if desired. See the [configuration](./configuration.md) guide for more information. + + +### Building the PBS Module + +To build PBS, run: +``` +cargo build --release --bin commit-boost-pbs +``` + +This will create a binary in `./target/release/commit-boost-pbs`. To verify it works, create [a TOML configuration](./configuration.md) for the PBS module (e.g., `cb-config.toml`). + +As a quick example, we'll use this configuration that connects to the Flashbots relay on the Hoodi network: +```toml +chain = "Hoodi" + +[pbs] +port = 18550 +with_signer = true + +[[relays]] +url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" + +[metrics] +enabled = true + +[signer] +[signer.local.loader] +format = "lighthouse" +keys_path = "/tmp/keys" +secrets_path = "/tmp/secrets" +``` + +Set the path to it in the `CB_CONFIG` environment variable and run the binary: +``` +CB_CONFIG=cb-config.toml ./target/release/commit-boost-pbs +``` + +If it works, you should see output like this: +``` +2025-05-07T21:09:17.407245Z WARN No metrics server configured +2025-05-07T21:09:17.407257Z INFO starting PBS service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" addr=127.0.0.1:18550 events_subs=0 chain=Hoodi +2025-05-07T21:09:17.746855Z INFO : new request ua="" relay_check=true method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +2025-05-07T21:09:17.896196Z INFO : relay check successful method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +``` + +If you do, then the binary works. + + +### Building the Signer Module + +To build the Signer, run: +``` +cargo build --release --bin commit-boost-signer +``` + +This will create a binary in `./target/release/commit-boost-signer`. To verify it works, create [a TOML configuration](./configuration.md) for the Signer module (e.g., `cb-config.toml`). We'll use the example in the PBS build section above. + +The signer needs the following environment variables set: +- `CB_CONFIG` = path of your config file. +- `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. +- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. + +Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: + +``` +mkdir -p /tmp/keys && mkdir -p /tmp/secrets +CB_CONFIG=cb-config.toml CB_JWTS="test_jwts=dummy" CB_SIGNER_PORT=20000 ./target/release/commit-boost-signer +``` + +You should see output like this: +``` +2025-05-07T21:43:46.385535Z WARN Proxy store not configured. Proxies keys and delegations will not be persisted +2025-05-07T21:43:46.393507Z INFO Starting signing service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" modules=["test_jwts"] port=20000 loaded_consensus=0 loaded_proxies=0 +2025-05-07T21:43:46.393574Z WARN No metrics server configured +``` + +If you do, then the binary works. \ No newline at end of file From 9438dae97bbb5d13032519c34ca9ad4e7c468137 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:25:24 -0400 Subject: [PATCH 10/35] Refactored the Github release action to use the Docker builder --- .github/workflows/release.yml | 159 ++++++++++++++++++++++++++------- provisioning/build.Dockerfile | 21 +---- provisioning/cli.Dockerfile | 0 provisioning/protoc.sh | 57 ++++++++++++ provisioning/signer.Dockerfile | 134 ++------------------------- 5 files changed, 194 insertions(+), 177 deletions(-) delete mode 100644 provisioning/cli.Dockerfile create mode 100755 provisioning/protoc.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 906c01f3..40745fbb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,11 +10,86 @@ permissions: packages: write jobs: - build-binaries: + # Builds the x64 and arm64 binaries for Linux, for all 3 crates, via the Docker builder + build-binaries-linux: strategy: matrix: target: - - x86_64-unknown-linux-gnu + - amd64 + - arm64 + name: + - commit-boost-cli + - commit-boost-pbs + - commit-boost-signer + include: + - target: amd64 + package-suffix: x86-64 + - target: arm64 + package-suffix: arm64 + - name: commit-boost-cli + target-crate: cli + - name: commit-boost-pbs + target-crate: pbs + - name: commit-boost-signer + target-crate: signer + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: "stable" + fetch-depth: 0 + submodules: true + + - name: Log commit hash + run: | + echo "Releasing commit: $(git rev-parse HEAD)" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build binary (Linux) + uses: docker/build-push-action@v6 + with: + context: . + push: false + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate}} + cache-to: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate }},mode=max + file: provisioning/build.Dockerfile + outputs: type=local,dest=build + build-args: | + TARGET_CRATE=${{ matrix.name }} + + - name: Package binary (Linux) + run: | + cd build/linux_${{ matrix.target }} + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ../../ + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }} + path: | + ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz + + # Builds the arm64 binaries for Darwin, for all 3 crates, natively + build-binaries-darwin: + strategy: + matrix: + target: + # x64 requires macos-latest-large which is not available in the free tier # - x86_64-apple-darwin - aarch64-apple-darwin name: @@ -22,10 +97,8 @@ jobs: - commit-boost-pbs - commit-boost-signer include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-latest # - target: x86_64-apple-darwin - # os: macos-latest + # os: macos-latest-large - target: aarch64-apple-darwin os: macos-latest runs-on: ${{ matrix.os }} @@ -41,6 +114,12 @@ jobs: run: | echo "Releasing commit: $(git rev-parse HEAD)" + - name: Install Protoc + run: + # Brew's version is much more up to date than the Linux ones, and installing the latest via script runs into curl issues so for now, brew's easier to use + # provisioning/protoc.sh + brew install protobuf + - name: Cache Cargo registry uses: actions/cache@v3 with: @@ -63,48 +142,25 @@ jobs: ${{ runner.os }}-cargo-build-${{ matrix.target }}- ${{ runner.os }}-cargo-build- - - name: Install protoc (Ubuntu) - if: runner.os == 'Linux' - run: sudo apt-get install protobuf-compiler - - - name: Install protoc (macOS) - if: runner.os == 'macOS' - run: brew install protobuf - - - name: Set up Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - target: ${{ matrix.target }} - - - name: Build binary + - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - env: - CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER: gcc - name: Package binary (Unix) - if: runner.os != 'Windows' run: | cd target/${{ matrix.target }}/release tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ - - name: Package binary (Windows) - if: runner.os == 'Windows' - run: | - 7z a ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.zip target\${{ matrix.target }}\release\${{ matrix.name }}.exe - - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.${{ runner.os == 'Windows' && 'zip' || 'tar.gz' }} + ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + # Builds the PBS Docker image build-and-push-pbs-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -114,6 +170,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-amd64 + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_arm64/commit-boost-pbs-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -133,6 +203,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} @@ -140,8 +212,9 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile + # Builds the Signer Docker image build-and-push-signer-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -151,6 +224,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_x86-64/commit-boost-signer-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-amd64 + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_arm64/commit-boost-signer-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -170,6 +257,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} @@ -177,9 +266,11 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile + # Creates a draft release on GitHub with the binaries finalize-release: needs: - - build-binaries + - build-binaries-linux + - build-binaries-darwin - build-and-push-pbs-docker - build-and-push-signer-docker runs-on: ubuntu-latest diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 83679ed5..a4eb3723 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -14,23 +14,6 @@ RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - # Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 @@ -90,6 +73,10 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Get the latest Protoc since the one in the Debian repo is incredibly old +COPY provisioning/protoc.sh provisioning/protoc.sh +RUN provisioning/protoc.sh + # Now we can copy the source files - chef cook wants to run before this step COPY . . diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile deleted file mode 100644 index e69de29b..00000000 diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh new file mode 100755 index 00000000..7f66a656 --- /dev/null +++ b/provisioning/protoc.sh @@ -0,0 +1,57 @@ +#!/bin/sh + +# This script installs the latest version of protoc (Protocol Buffers Compiler) from the official GitHub repository. + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + +# Get the OS +case "$(uname)" in + Darwin*) + PROTOC_OS="osx" ; + TARGET_DIR="/opt/homebrew" ; # Emulating a homebrew install so we don't need elevated permissions + # Darwin comes with unzip and curl already + brew install jq ;; + Linux*) + PROTOC_OS="linux" ; + TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually + apt update && apt install -y unzip curl ca-certificates jq ;; + *) + echo "Unsupported OS: $(uname)" ; + exit 1 ;; +esac + +# Get the architecture +case "$(uname -m)" in + x86_64) PROTOC_ARCH="x86_64" ;; + aarch64) PROTOC_ARCH="aarch_64" ;; + arm64) PROTOC_ARCH="aarch_64" ;; + *) echo "Unsupported architecture: [$(uname -m)]"; exit 1 ;; +esac + +# Get the latest version +PROTOC_RAW_VERSION=$(curl --retry 10 --retry-delay 2 --retry-all-errors -fsL "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | jq -r .tag_name) || fail "Failed to get the latest version of protoc" +if [ "$PROTOC_RAW_VERSION" = "null" ]; then + fail "Failed to get the latest version of protoc" +fi +echo "Latest version of protoc: [$PROTOC_RAW_VERSION]" +PROTOC_VERSION=$(echo $PROTOC_RAW_VERSION | sed 's/^v//') || fail "Failed to parse the latest version of protoc" +if [ -z "$PROTOC_VERSION" ]; then + fail "Latest version of protoc was empty" +fi + +echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" + +# Download and install protoc +curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" +unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +rm -rf protoc.zip || fail "Failed to remove protoc zip file" +echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6c5ac045..f9824e7a 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,135 +1,17 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -# Planner stage -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -# Builder stage -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-signer /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost From 12c020a20af91f673e348b12f2bc561fe57a6ae4 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:53:02 -0400 Subject: [PATCH 11/35] Fixed the Docker image binary filenames --- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eff5890..9eb72702 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index f9824e7a..05679762 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From 53cafc039a747e61a92b9fb41a9a53a395f1a1a0 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 17:42:28 -0400 Subject: [PATCH 12/35] Cleaned up the Darwin artifact step --- .github/workflows/release.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40745fbb..5be42110 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -98,9 +98,11 @@ jobs: - commit-boost-signer include: # - target: x86_64-apple-darwin - # os: macos-latest-large + # os: macos-latest-large + # package-suffix: x86-64 - target: aarch64-apple-darwin os: macos-latest + package-suffix: arm64 runs-on: ${{ matrix.os }} steps: - name: Checkout code @@ -145,18 +147,18 @@ jobs: - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - - name: Package binary (Unix) + - name: Package binary (Darwin) run: | cd target/${{ matrix.target }}/release - tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} - mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ../../../ - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} + name: ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz # Builds the PBS Docker image build-and-push-pbs-docker: @@ -208,8 +210,6 @@ jobs: tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile # Builds the Signer Docker image @@ -262,8 +262,6 @@ jobs: tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/signer:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile # Creates a draft release on GitHub with the binaries From 58c61174c138f61a775031c2c28b00dac5038c64 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 00:24:35 -0400 Subject: [PATCH 13/35] Made the CI workflow and justfile use the same toolchain as the source --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..ae9bad89 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2025-02-26 + toolchain: 1.83 components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index e6d11f62..b9250870 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the nightly-2025-02-26 toolchain is installed -toolchain := "nightly-2025-02-26" +# Makes sure the same toolchain as the source is installed +toolchain := 1.83 fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 45e581baabbed9ba7987c3260b286a877a22480b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 01:06:05 -0400 Subject: [PATCH 14/35] Revert "Made the CI workflow and justfile use the same toolchain as the source" This reverts commit 58c61174c138f61a775031c2c28b00dac5038c64. --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae9bad89..894d13da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: 1.83 + toolchain: nightly-2025-02-26 components: clippy, rustfmt - name: Install protoc - run: sudo provisioning/protoc.sh + run: sudo apt-get install protobuf-compiler - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index b9250870..e6d11f62 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the same toolchain as the source is installed -toolchain := 1.83 +# Makes sure the nightly-2025-02-26 toolchain is installed +toolchain := "nightly-2025-02-26" fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 24a10c55f3bd558ad976852255b351bed31ef641 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:26:15 -0400 Subject: [PATCH 15/35] Testing removal of OpenSSL vendored option --- Cargo.lock | 12 -------- Cargo.toml | 3 -- crates/common/Cargo.toml | 4 --- provisioning/build.Dockerfile | 56 +++++++++++------------------------ 4 files changed, 17 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 436d3b65..5ebc811a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,8 +1494,6 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", - "k256", - "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3552,15 +3550,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-src" -version = "300.5.0+3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.106" @@ -3569,7 +3558,6 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index 14cddf82..aef26a94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,6 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" -[workspace.features] -openssl-vendored = ["crates/common/openssl-vendored"] - [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 15c0b8d1..df78b046 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,7 +41,3 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true -openssl = { version = "0.10", optional = true, features = ["vendored"] } - -[features] -openssl-vendored = ["openssl/vendored"] diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index a4eb3723..34ad27a5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,15 +1,15 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE WORKDIR /app FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE COPY . . RUN cargo chef prepare --recipe-path recipe.json FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json @@ -18,43 +18,33 @@ COPY --from=planner /app/recipe.json recipe.json RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ + dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ fi # Run cook to prep the build @@ -64,14 +54,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old COPY provisioning/protoc.sh provisioning/protoc.sh @@ -88,14 +72,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ From e36da545b00929146efbfa60eac1df0efb512d5e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:59:32 -0400 Subject: [PATCH 16/35] Updating just in the CI workflow --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,12 +30,12 @@ jobs: components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 with: - just-version: 1.5.0 + just-version: 1.40.0 - name: Check compilation run: cargo check From e7c6d193b15232dfa51e09f61c075e3c9941a18d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 02:56:00 -0400 Subject: [PATCH 17/35] Refactored the signer to support host and port config settings --- crates/cli/src/docker_init.rs | 34 +++++++++++++++++------ crates/common/src/config/constants.rs | 2 +- crates/common/src/config/signer.rs | 37 +++++++++++++++++++------ crates/common/src/signer/constants.rs | 1 + crates/common/src/signer/mod.rs | 2 ++ crates/signer/src/service.rs | 7 ++--- docs/docs/get_started/configuration.md | 8 ++++++ docs/docs/get_started/running/binary.md | 4 +-- 8 files changed, 72 insertions(+), 23 deletions(-) create mode 100644 crates/common/src/signer/constants.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 4453f597..652e3448 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -14,11 +14,11 @@ use cb_common::{ PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, - SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, - SIGNER_MODULE_NAME, SIGNER_PORT_ENV, SIGNER_URL_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, + SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::ModuleId, utils::random_jwt_secret, }; @@ -73,7 +73,11 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = 20000; + let signer_port = if let Some(signer_config) = &cb_config.signer { + signer_config.port + } else { + DEFAULT_SIGNER_PORT + }; let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() @@ -334,10 +338,17 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); @@ -459,13 +470,20 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 422af7e7..d7799146 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -33,7 +33,7 @@ pub const SIGNER_IMAGE_DEFAULT: &str = "ghcr.io/commit-boost/signer:latest"; pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server -pub const SIGNER_PORT_ENV: &str = "CB_SIGNER_PORT"; +pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 9df6b948..dce97666 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr}, + path::PathBuf, +}; use eyre::{bail, OptionExt, Result}; use serde::{Deserialize, Serialize}; @@ -6,18 +10,25 @@ use tonic::transport::{Certificate, Identity}; use url::Url; use super::{ - constants::SIGNER_IMAGE_DEFAULT, load_jwt_secrets, utils::load_env_var, CommitBoostConfig, - SIGNER_PORT_ENV, + load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::{Chain, ModuleId}, + utils::{default_host, default_u16}, }; #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct SignerConfig { + /// Host address to listen for signer API calls on + #[serde(default = "default_host")] + pub host: Ipv4Addr, + /// Port to listen for signer API calls on + #[serde(default = "default_u16::")] + pub port: u16, /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, @@ -87,7 +98,7 @@ pub struct StartSignerConfig { pub chain: Chain, pub loader: Option, pub store: Option, - pub server_port: u16, + pub endpoint: SocketAddr, pub jwts: HashMap, pub dirk: Option, } @@ -97,7 +108,17 @@ impl StartSignerConfig { let config = CommitBoostConfig::from_env_path()?; let jwts = load_jwt_secrets()?; - let server_port = load_env_var(SIGNER_PORT_ENV)?.parse()?; + + // Load the server endpoint first from the env var, then the config, and finally + // the defaults + let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { + endpoint.parse()? + } else { + match config.signer { + Some(ref signer) => SocketAddr::from((signer.host, signer.port)), + None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), + } + }; let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; @@ -105,7 +126,7 @@ impl StartSignerConfig { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), - server_port, + endpoint, jwts, store, dirk: None, @@ -133,7 +154,7 @@ impl StartSignerConfig { Ok(StartSignerConfig { chain: config.chain, - server_port, + endpoint, jwts, loader: None, store, diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs new file mode 100644 index 00000000..aa834f91 --- /dev/null +++ b/crates/common/src/signer/constants.rs @@ -0,0 +1 @@ +pub const DEFAULT_SIGNER_PORT: u16 = 20000; diff --git a/crates/common/src/signer/mod.rs b/crates/common/src/signer/mod.rs index e0a164a7..b6dce29d 100644 --- a/crates/common/src/signer/mod.rs +++ b/crates/common/src/signer/mod.rs @@ -1,8 +1,10 @@ +mod constants; mod loader; mod schemes; mod store; mod types; +pub use constants::*; pub use loader::*; pub use schemes::*; pub use store::*; diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 28a1d934..a965f057 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, net::SocketAddr, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use axum::{ extract::{Request, State}, @@ -67,7 +67,7 @@ impl SigningService { let loaded_consensus = state.manager.read().await.available_consensus_signers(); let loaded_proxies = state.manager.read().await.available_proxy_signers(); - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, port =? config.server_port, loaded_consensus, loaded_proxies, "Starting signing service"); + info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); SigningService::init_metrics(config.chain)?; @@ -81,8 +81,7 @@ impl SigningService { .route_layer(middleware::from_fn(log_request)) .route(STATUS_PATH, get(handle_status)); - let address = SocketAddr::from(([0, 0, 0, 0], config.server_port)); - let listener = TcpListener::bind(address).await?; + let listener = TcpListener::bind(config.endpoint).await?; axum::serve(listener, app).await.wrap_err("signer server exited") } diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 4e642205..5d196619 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -65,6 +65,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "keys" @@ -111,6 +113,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "teku" keys_path = "keys" @@ -133,6 +137,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lodestar" keys_path = "keys" @@ -299,6 +305,8 @@ port = 18550 url = "" [signer] +port = 20000 + [signer.loader] format = "lighthouse" keys_path = "/path/to/keys" diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 3708ab19..ea5138c6 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -22,12 +22,12 @@ Modules need some environment variables to work correctly. ### PBS Module - `CB_BUILDER_URLS`: optional, comma-separated list of urls to `events` modules where to post builder events. -- `CB_PBS_ENDPOINT`: optional, override the endpoint where the PBS module will open the port for the beacon node. +- `CB_PBS_ENDPOINT`: optional, override to specify the `IP:port` endpoint where the PBS module will open the port for the beacon node. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module - `CB_SIGNER_JWT_SECRET`: secret to use for JWT authentication with the Signer module. -- `CB_SIGNER_PORT`: required, port to open the signer server on. +- `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). - `CB_SIGNER_LOADER_FORMAT`, `CB_SIGNER_LOADER_KEYS_DIR` and `CB_SIGNER_LOADER_SECRETS_DIR`: paths to the `keys` and `secrets` directories or files (ERC-2335 style keystores, see [Signer config](../configuration/#signer-module) for more info). From 6117219d62f6243d263fdbabddc5bb387bfd2857 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:24:02 -0400 Subject: [PATCH 18/35] Updated docs --- docs/docs/get_started/building.md | 1 - docs/docs/get_started/configuration.md | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..f831de57 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -166,7 +166,6 @@ This will create a binary in `./target/release/commit-boost-signer`. To verify i The signer needs the following environment variables set: - `CB_CONFIG` = path of your config file. - `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. -- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 5d196619..efe9da3f 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -39,6 +39,13 @@ Commit-Boost supports both local and remote signers. The signer module is respon To start a local signer module, you need to include its parameters in the config file ```toml +[pbs] +... +with_signer = true + +[signer] +port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "/path/to/keys" @@ -64,6 +71,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -89,7 +100,13 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] + port = 20000 + [signer.local.loader] format = "prysm" keys_path = "wallet/direct/accounts/all-accounts.keystore.json" @@ -112,6 +129,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -136,6 +157,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 From c0f591d5656aed3f2b705583bcd95d88abe45394 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:41:09 -0400 Subject: [PATCH 19/35] Fixing Clippy in CI workflow --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b15367f..3be3a7da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,9 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt + + - name: Install Clippy on prod toolchain + run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From adbd34a02d52a86251258cc82be5f1ebf47474fe Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:07:51 -0400 Subject: [PATCH 20/35] Removed obviated CI setup --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3be3a7da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,9 +28,6 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt - - - name: Install Clippy on prod toolchain - run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From e3488b34f8629fe65071688165c17579d4b9fd23 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 20 May 2025 15:40:27 -0400 Subject: [PATCH 21/35] Minor dedup of RwLock guard acquisition --- crates/signer/src/service.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index a965f057..cce8038e 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -64,8 +64,14 @@ impl SigningService { jwts: config.jwts.into(), }; - let loaded_consensus = state.manager.read().await.available_consensus_signers(); - let loaded_proxies = state.manager.read().await.available_proxy_signers(); + // Get the signer counts + let loaded_consensus: usize; + let loaded_proxies: usize; + { + let manager = state.manager.read().await; + loaded_consensus = manager.available_consensus_signers(); + loaded_proxies = manager.available_proxy_signers(); + } info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); From c3d7ec40f92a4dc2c4481afd517d81ebf9e9b7cc Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 00:58:49 -0400 Subject: [PATCH 22/35] Added rate limiting for signer clients with repeated JWT auth failures --- crates/common/src/config/constants.rs | 5 ++ crates/common/src/config/signer.rs | 51 ++++++++++- crates/common/src/signer/constants.rs | 5 ++ crates/common/src/utils.rs | 4 + crates/signer/src/error.rs | 6 ++ crates/signer/src/service.rs | 116 ++++++++++++++++++++++++-- 6 files changed, 176 insertions(+), 11 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index d7799146..5941a42b 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -35,6 +35,11 @@ pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; +// JWT authentication settings +pub const SIGNER_JWT_AUTH_FAIL_LIMIT_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_LIMIT"; +pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = + "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; + /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; /// The JWT secret for the signer to validate the modules requests diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..6eb870cf 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -11,13 +11,17 @@ use url::Url; use super::{ load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, + signer::{ + ProxyStore, SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, + DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, DEFAULT_SIGNER_PORT, + }, types::{Chain, ModuleId}, - utils::{default_host, default_u16}, + utils::{default_host, default_u16, default_u32}, }; #[derive(Debug, Serialize, Deserialize, Clone)] @@ -26,12 +30,24 @@ pub struct SignerConfig { /// Host address to listen for signer API calls on #[serde(default = "default_host")] pub host: Ipv4Addr, + /// Port to listen for signer API calls on #[serde(default = "default_u16::")] pub port: u16, + /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, + + /// Number of JWT auth failures before rate limiting an endpoint + #[serde(default = "default_u32::")] + pub jwt_auth_fail_limit: u32, + + /// Duration in seconds to rate limit an endpoint after the JWT auth failure + /// limit has been reached + #[serde(default = "default_u32::")] + pub jwt_auth_fail_timeout_seconds: u32, + /// Inner type-specific configuration #[serde(flatten)] pub inner: SignerType, @@ -100,6 +116,8 @@ pub struct StartSignerConfig { pub store: Option, pub endpoint: SocketAddr, pub jwts: HashMap, + pub jwt_auth_fail_limit: u32, + pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, } @@ -120,6 +138,29 @@ impl StartSignerConfig { } }; + // Load the JWT auth fail limit the same way + let jwt_auth_fail_limit = + if let Some(limit) = load_optional_env_var(SIGNER_JWT_AUTH_FAIL_LIMIT_ENV) { + limit.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_limit, + None => DEFAULT_JWT_AUTH_FAIL_LIMIT, + } + }; + + // Load the JWT auth fail timeout the same way + let jwt_auth_fail_timeout_seconds = if let Some(timeout) = + load_optional_env_var(SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV) + { + timeout.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_timeout_seconds, + None => DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + } + }; + let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; match signer { @@ -128,6 +169,8 @@ impl StartSignerConfig { loader: Some(loader), endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, store, dirk: None, }), @@ -156,6 +199,8 @@ impl StartSignerConfig { chain: config.chain, endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, loader: None, store, dirk: Some(DirkConfig { diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs index aa834f91..45e3ce23 100644 --- a/crates/common/src/signer/constants.rs +++ b/crates/common/src/signer/constants.rs @@ -1 +1,6 @@ pub const DEFAULT_SIGNER_PORT: u16 = 20000; + +// Rate limit signer API requests for 5 minutes after the endpoint has 3 JWT +// auth failures +pub const DEFAULT_JWT_AUTH_FAIL_LIMIT: u32 = 3; +pub const DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS: u32 = 5 * 60; diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 37119580..a1dcb7cb 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -137,6 +137,10 @@ pub const fn default_u64() -> u64 { U } +pub const fn default_u32() -> u32 { + U +} + pub const fn default_u16() -> u16 { U } diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index 477e9e42..a2a113f3 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -27,6 +27,9 @@ pub enum SignerModuleError { #[error("internal error: {0}")] Internal(String), + + #[error("rate limited for {0} more seconds")] + RateLimited(f64), } impl IntoResponse for SignerModuleError { @@ -45,6 +48,9 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::RateLimited(duration) => { + (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) + } } .into_response() } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index cce8038e..3ca1d5ac 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,7 +1,12 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::Arc, + time::{Duration, Instant}, +}; use axum::{ - extract::{Request, State}, + extract::{ConnectInfo, Request, State}, http::StatusCode, middleware::{self, Next}, response::{IntoResponse, Response}, @@ -41,13 +46,30 @@ use crate::{ /// Implements the Signer API and provides a service for signing requests pub struct SigningService; +// Tracker for a peer's JWT failures +struct JwtAuthFailureInfo { + // Number of auth failures since the first failure was tracked + failure_count: u32, + + // Time of the last auth failure + last_failure: Instant, +} + #[derive(Clone)] struct SigningState { /// Manager handling different signing methods manager: Arc>, + /// Map of modules ids to JWT secrets. This also acts as registry of all /// modules running jwts: Arc>, + + /// Map of JWT failures per peer + jwt_auth_failures: Arc>>, + + // JWT auth failure settings + jwt_auth_fail_limit: u32, + jwt_auth_fail_timeout: Duration, } impl SigningService { @@ -62,6 +84,9 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), jwts: config.jwts.into(), + jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), + jwt_auth_fail_limit: config.jwt_auth_fail_limit, + jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), }; // Get the signer counts @@ -73,7 +98,17 @@ impl SigningService { loaded_proxies = manager.available_proxy_signers(); } - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); + info!( + version = COMMIT_BOOST_VERSION, + commit_hash = COMMIT_BOOST_COMMIT, + modules =? module_ids, + endpoint =? config.endpoint, + loaded_consensus, + loaded_proxies, + jwt_auth_fail_limit =? state.jwt_auth_fail_limit, + jwt_auth_fail_timeout =? state.jwt_auth_fail_timeout, + "Starting signing service" + ); SigningService::init_metrics(config.chain)?; @@ -85,7 +120,8 @@ impl SigningService { .route(RELOAD_PATH, post(handle_reload)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)); + .route(STATUS_PATH, get(handle_status)) + .into_make_service_with_connect_info::(); let listener = TcpListener::bind(config.endpoint).await?; @@ -101,9 +137,76 @@ impl SigningService { async fn jwt_auth( State(state): State, TypedHeader(auth): TypedHeader>, + addr: ConnectInfo, mut req: Request, next: Next, ) -> Result { + // Check if the request needs to be rate limited + let client_ip = addr.ip().to_string(); + check_jwt_rate_limit(&state, &client_ip).await?; + + // Process JWT authorization + match check_jwt_auth(&auth, &state).await { + Ok(module_id) => { + req.extensions_mut().insert(module_id); + Ok(next.run(req).await) + } + Err(SignerModuleError::Unauthorized) => { + let mut failures = state.jwt_auth_failures.write().await; + let failure_info = failures + .entry(client_ip) + .or_insert(JwtAuthFailureInfo { failure_count: 0, last_failure: Instant::now() }); + failure_info.failure_count += 1; + failure_info.last_failure = Instant::now(); + Err(SignerModuleError::Unauthorized) + } + Err(err) => Err(err), + } +} + +/// Checks if the incoming request needs to be rate limited due to previous JWT +/// authentication failures +async fn check_jwt_rate_limit( + state: &SigningState, + client_ip: &String, +) -> Result<(), SignerModuleError> { + let mut failures = state.jwt_auth_failures.write().await; + + // Ignore clients that don't have any failures + if let Some(failure_info) = failures.get(client_ip) { + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + let elapsed = failure_info.last_failure.elapsed(); + if elapsed > state.jwt_auth_fail_timeout { + debug!("Removing {client_ip} from JWT auth failure list"); + failures.remove(client_ip); + return Ok(()); + } + + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit + ); + return Ok(()); + } + + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout - elapsed; + warn!("Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures"); + return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + } + + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + Ok(()) +} + +/// Checks if a request can successfully authenticate with the JWT secret +async fn check_jwt_auth( + auth: &Authorization, + state: &SigningState, +) -> Result { let jwt: Jwt = auth.token().to_string().into(); // We first need to decode it to get the module id and then validate it @@ -122,10 +225,7 @@ async fn jwt_auth( error!("Unauthorized request. Invalid JWT: {e}"); SignerModuleError::Unauthorized })?; - - req.extensions_mut().insert(module_id); - - Ok(next.run(req).await) + Ok(module_id) } /// Requests logging middleware layer From 9ddad6426a1fcdeb441adb259b6b2408729f1937 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 02:06:21 -0400 Subject: [PATCH 23/35] Added Signer config validation --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + crates/common/Cargo.toml | 1 + crates/common/src/config/mod.rs | 3 +++ crates/common/src/config/signer.rs | 21 ++++++++++++++++++++- tests/tests/config.rs | 12 ++++++------ 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..b80a4542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1488,6 +1488,7 @@ dependencies = [ "cipher 0.4.4", "ctr 0.9.2", "derive_more 2.0.1", + "docker-image", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz 0.8.3", @@ -2158,6 +2159,16 @@ dependencies = [ "serde_yaml", ] +[[package]] +name = "docker-image" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ed901b8f2157bafce6e96f39217f7b1a4af32d84266d251ed7c22ce001f0b" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "doctest-file" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index aef26a94..b02ad0da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ color-eyre = "0.6.3" ctr = "0.9.2" derive_more = { version = "2.0.1", features = ["deref", "display", "from", "into"] } docker-compose-types = "0.16.0" +docker-image = "0.2.1" eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "8d058e4040b765a96aa4968f4167af7571292be2" } ethereum_serde_utils = "0.7.0" ethereum_ssz = "0.8" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..c3955d4a 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -16,6 +16,7 @@ blst.workspace = true cipher.workspace = true ctr.workspace = true derive_more.workspace = true +docker-image.workspace = true eth2_keystore.workspace = true ethereum_serde_utils.workspace = true ethereum_ssz.workspace = true diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index 75fd3c9d..b782999b 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -41,6 +41,9 @@ impl CommitBoostConfig { /// Validate config pub async fn validate(&self) -> Result<()> { self.pbs.pbs_config.validate(self.chain).await?; + if let Some(signer) = &self.signer { + signer.validate().await?; + } Ok(()) } diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 6eb870cf..01b50cde 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -4,7 +4,8 @@ use std::{ path::PathBuf, }; -use eyre::{bail, OptionExt, Result}; +use docker_image::DockerImage; +use eyre::{bail, ensure, OptionExt, Result}; use serde::{Deserialize, Serialize}; use tonic::transport::{Certificate, Identity}; use url::Url; @@ -40,6 +41,7 @@ pub struct SignerConfig { pub docker_image: String, /// Number of JWT auth failures before rate limiting an endpoint + /// If set to 0, no rate limiting will be applied #[serde(default = "default_u32::")] pub jwt_auth_fail_limit: u32, @@ -53,6 +55,23 @@ pub struct SignerConfig { pub inner: SignerType, } +impl SignerConfig { + /// Validate the signer config + pub async fn validate(&self) -> Result<()> { + // Port must be positive + ensure!(self.port > 0, "Port must be positive"); + + // The Docker tag must parse + ensure!(!self.docker_image.is_empty(), "Docker image is empty"); + ensure!( + DockerImage::parse(&self.docker_image).is_ok(), + format!("Invalid Docker image: {}", self.docker_image) + ); + + Ok(()) + } +} + fn default_signer() -> String { SIGNER_IMAGE_DEFAULT.to_string() } diff --git a/tests/tests/config.rs b/tests/tests/config.rs index dafd96d9..f6f31d96 100644 --- a/tests/tests/config.rs +++ b/tests/tests/config.rs @@ -37,11 +37,11 @@ async fn test_load_pbs_happy() -> Result<()> { // Docker and general settings assert_eq!(config.pbs.docker_image, "ghcr.io/commit-boost/pbs:latest"); - assert_eq!(config.pbs.with_signer, false); + assert!(!config.pbs.with_signer); assert_eq!(config.pbs.pbs_config.host, "127.0.0.1".parse::().unwrap()); assert_eq!(config.pbs.pbs_config.port, 18550); - assert_eq!(config.pbs.pbs_config.relay_check, true); - assert_eq!(config.pbs.pbs_config.wait_all_registrations, true); + assert!(config.pbs.pbs_config.relay_check); + assert!(config.pbs.pbs_config.wait_all_registrations); // Timeouts assert_eq!(config.pbs.pbs_config.timeout_get_header_ms, 950); @@ -49,12 +49,12 @@ async fn test_load_pbs_happy() -> Result<()> { assert_eq!(config.pbs.pbs_config.timeout_register_validator_ms, 3000); // Bid settings and validation - assert_eq!(config.pbs.pbs_config.skip_sigverify, false); + assert!(!config.pbs.pbs_config.skip_sigverify); dbg!(&config.pbs.pbs_config.min_bid_wei); dbg!(&U256::from(0.5)); assert_eq!(config.pbs.pbs_config.min_bid_wei, U256::from((0.5 * WEI_PER_ETH as f64) as u64)); assert_eq!(config.pbs.pbs_config.late_in_slot_time_ms, 2000); - assert_eq!(config.pbs.pbs_config.extra_validation_enabled, false); + assert!(!config.pbs.pbs_config.extra_validation_enabled); assert_eq!( config.pbs.pbs_config.rpc_url, Some("https://ethereum-holesky-rpc.publicnode.com".parse::().unwrap()) @@ -64,7 +64,7 @@ async fn test_load_pbs_happy() -> Result<()> { let relay = &config.relays[0]; assert_eq!(relay.id, Some("example-relay".to_string())); assert_eq!(relay.entry.url, "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz".parse::().unwrap()); - assert_eq!(relay.enable_timing_games, false); + assert!(!relay.enable_timing_games); assert_eq!(relay.target_first_request_ms, Some(200)); assert_eq!(relay.frequency_get_header_ms, Some(300)); From c62185e13f301a3abcab32f9a28ed42f1185d7e3 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 06:54:50 -0400 Subject: [PATCH 24/35] Started unit test setup for the Signer --- Cargo.lock | 6 +++-- Cargo.toml | 1 + tests/Cargo.toml | 2 ++ tests/src/utils.rs | 44 +++++++++++++++++++++++++++++-- tests/tests/pbs_get_header.rs | 2 +- tests/tests/signer_jwt_auth.rs | 47 ++++++++++++++++++++++++++++++++++ 6 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 tests/tests/signer_jwt_auth.rs diff --git a/Cargo.lock b/Cargo.lock index b80a4542..17d43e3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1590,9 +1590,11 @@ dependencies = [ "axum 0.8.1", "cb-common", "cb-pbs", + "cb-signer", "eyre", "reqwest", "serde_json", + "tempfile", "tokio", "tracing", "tracing-subscriber", @@ -4874,9 +4876,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.19.0" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488960f40a3fd53d72c2a29a58722561dee8afdd175bd88e3db4677d7b2ba600" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", "getrandom 0.3.1", diff --git a/Cargo.toml b/Cargo.toml index b02ad0da..5294508f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,6 +57,7 @@ serde_json = "1.0.117" serde_yaml = "0.9.33" sha2 = "0.10.8" ssz_types = "0.10" +tempfile = "3.20.0" thiserror = "2.0.12" tokio = { version = "1.37.0", features = ["full"] } toml = "0.8.13" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index ce273ae7..f1b5c9d9 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -9,9 +9,11 @@ alloy.workspace = true axum.workspace = true cb-common.workspace = true cb-pbs.workspace = true +cb-signer.workspace = true eyre.workspace = true reqwest.workspace = true serde_json.workspace = true +tempfile.workspace = true tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true diff --git a/tests/src/utils.rs b/tests/src/utils.rs index f2ae9157..e8561931 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -1,13 +1,22 @@ use std::{ + collections::HashMap, net::{Ipv4Addr, SocketAddr}, sync::{Arc, Once}, }; use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ - config::{PbsConfig, PbsModuleConfig, RelayConfig}, + config::{ + PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, + SIGNER_IMAGE_DEFAULT, + }, pbs::{RelayClient, RelayEntry}, - types::Chain, + signer::{ + SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + DEFAULT_SIGNER_PORT, + }, + types::{Chain, ModuleId}, + utils::default_host, }; use eyre::Result; @@ -91,3 +100,34 @@ pub fn to_pbs_config( muxes: None, } } + +pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { + SignerConfig { + host: default_host(), + port: DEFAULT_SIGNER_PORT, + docker_image: SIGNER_IMAGE_DEFAULT.to_string(), + jwt_auth_fail_limit: DEFAULT_JWT_AUTH_FAIL_LIMIT, + jwt_auth_fail_timeout_seconds: DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + inner: SignerType::Local { loader, store: None }, + } +} + +pub fn get_start_signer_config( + signer_config: SignerConfig, + chain: Chain, + jwts: HashMap, +) -> StartSignerConfig { + match signer_config.inner { + SignerType::Local { loader, .. } => StartSignerConfig { + chain, + loader: Some(loader), + store: None, + endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), + jwts, + jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, + dirk: None, + }, + _ => panic!("Only local signers are supported in tests"), + } +} diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 422a71a3..747d460c 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -23,7 +23,7 @@ use tree_hash::TreeHash; async fn test_get_header() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3200; diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs new file mode 100644 index 00000000..989cdb61 --- /dev/null +++ b/tests/tests/signer_jwt_auth.rs @@ -0,0 +1,47 @@ +use std::{collections::HashMap, fs, time::Duration}; + +use cb_common::{ + signer::{SignerLoader, ValidatorKeysFormat}, + types::{Chain, ModuleId}, +}; +use cb_signer::service::SigningService; +use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; +use eyre::Result; +use tempfile::tempdir; + +#[tokio::test] +async fn test_signer_jwt_auth_success() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let mut jwts = HashMap::new(); + jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); + + // Create a temp folder and key structure + let test_folder = tempdir()?; + let test_path = test_folder.path(); + let keys_path = test_path.join("keys"); + let secrets_path = test_path.join("secrets"); + fs::create_dir_all(&keys_path)?; + fs::create_dir_all(&secrets_path)?; + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path, + secrets_path, + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + tokio::spawn(SigningService::run(start_config)); + + // leave some time to start servers + tokio::time::sleep(Duration::from_millis(100)).await; + + // TODO: simple client to test the JWT auth endpoint + + Ok(()) +} From dc73c6215d604cd6f0165801ac7213b462953ebd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 00:52:33 -0400 Subject: [PATCH 25/35] Finished a basic signer module unit test --- tests/tests/signer_jwt_auth.rs | 62 +++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 989cdb61..0e9e97eb 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,13 +1,19 @@ -use std::{collections::HashMap, fs, time::Duration}; +use std::{collections::HashMap, time::Duration}; +use alloy::{hex, primitives::FixedBytes}; use cb_common::{ + commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, + utils::create_jwt, }; use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; -use tempfile::tempdir; +use tracing::info; + +const JWT_MODULE: &str = "test-module"; +const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { @@ -15,33 +21,57 @@ async fn test_signer_jwt_auth_success() -> Result<()> { let chain = Chain::Hoodi; // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); let mut jwts = HashMap::new(); - jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); - - // Create a temp folder and key structure - let test_folder = tempdir()?; - let test_path = test_folder.path(); - let keys_path = test_path.join("keys"); - let secrets_path = test_path.join("secrets"); - fs::create_dir_all(&keys_path)?; - fs::create_dir_all(&secrets_path)?; + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); // Create a signer config let loader = SignerLoader::ValidatorsDir { - keys_path, - secrets_path, + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; let config = get_signer_config(loader); + let host = config.host; + let port = config.port; let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config)); - // leave some time to start servers + // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, JWT_SECRET)?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_success(), "Failed to authenticate with JWT"); + let pubkey_json = response.json::().await?; - // TODO: simple client to test the JWT auth endpoint + // Verify the expected pubkeys are returned + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 6c3d9670f4ff7d9e6fa7e5b8b497deea01043347 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:15:44 -0400 Subject: [PATCH 26/35] Added a JWT failure unit test --- tests/tests/signer_jwt_auth.rs | 49 ++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 0e9e97eb..fd111814 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -75,3 +75,52 @@ async fn test_signer_jwt_auth_success() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); + let mut jwts = HashMap::new(); + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let host = config.host; + let port = config.port; + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + let server_handle = tokio::spawn(SigningService::run(start_config)); + + // Make sure the server is running + tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, "incorrect secret")?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} From 6464638a443b63e58ed3cfef381210aa13f963b2 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 02:05:28 -0400 Subject: [PATCH 27/35] Added a rate limit test and cleaned up a bit --- tests/tests/signer_jwt_auth.rs | 145 +++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 63 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index fd111814..961afb3e 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, time::Duration}; use alloy::{hex, primitives::FixedBytes}; use cb_common::{ commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, + config::StartSignerConfig, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, utils::create_jwt, @@ -10,6 +11,7 @@ use cb_common::{ use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; +use reqwest::{Response, StatusCode}; use tracing::info; const JWT_MODULE: &str = "test-module"; @@ -18,66 +20,75 @@ const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); - let chain = Chain::Hoodi; + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Mock JWT secrets + // Run a pubkeys request + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + + // Verify the expected pubkeys are returned + verify_pubkeys(response).await?; + + Ok(()) +} + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + let start_config = start_server().await?; - // Create a signer config - let loader = SignerLoader::ValidatorsDir { - keys_path: "data/keystores/keys".into(), - secrets_path: "data/keystores/secrets".into(), - format: ValidatorKeysFormat::Lighthouse, - }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; - let start_config = get_start_signer_config(config, chain, jwts); + // Run a pubkeys request - this should fail due to invalid JWT + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} - // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); +#[tokio::test] +async fn test_signer_jwt_rate_limit() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Make sure the server is running - tokio::time::sleep(Duration::from_millis(100)).await; - if server_handle.is_finished() { - return Err(eyre::eyre!( - "Signer service failed to start: {}", - server_handle.await.unwrap_err() - )); + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + for _ in 0..start_config.jwt_auth_fail_limit { + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); } - // Create a JWT header + // Run another request - this should fail due to rate limiting now let jwt = create_jwt(&module_id, JWT_SECRET)?; + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_success(), "Failed to authenticate with JWT"); - let pubkey_json = response.json::().await?; + // Wait for the rate limit timeout + tokio::time::sleep(Duration::from_secs(start_config.jwt_auth_fail_timeout_seconds as u64)) + .await; - // Verify the expected pubkeys are returned - assert_eq!(pubkey_json.keys.len(), 2); - let expected_pubkeys = vec![ - FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), - FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), - ]; - for expected in expected_pubkeys { - assert!( - pubkey_json.keys.iter().any(|k| k.consensus == expected), - "Expected pubkey not found: {:?}", - expected - ); - info!("Server returned expected pubkey: {:?}", expected); - } + // Now the next request should succeed + let response = client.get(&url).bearer_auth(&jwt).send().await?; + verify_pubkeys(response).await?; Ok(()) } -#[tokio::test] -async fn test_signer_jwt_auth_fail() -> Result<()> { +// Starts the signer moduler server on a separate task and returns its +// configuration +async fn start_server() -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -92,13 +103,13 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; + let mut config = get_signer_config(loader); + config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing + config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config.clone())); // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; @@ -108,19 +119,27 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { server_handle.await.unwrap_err() )); } + Ok(start_config) +} - // Create a JWT header - let jwt = create_jwt(&module_id, "incorrect secret")?; - - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); - info!( - "Server returned expected error code {} for invalid JWT: {}", - response.status(), - response.text().await.unwrap_or_else(|_| "No response body".to_string()) - ); +// Verifies that the pubkeys returned by the server match the pubkeys in the +// test data +async fn verify_pubkeys(response: Response) -> Result<()> { + // Verify the expected pubkeys are returned + assert!(response.status() == StatusCode::OK); + let pubkey_json = response.json::().await?; + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 0313f18c27880a85d5c9af7698884f27b7cf895e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 03:30:34 -0400 Subject: [PATCH 28/35] Added unique ports to unit tests for parallel execution --- tests/tests/signer_jwt_auth.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 961afb3e..90a0365f 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -21,7 +21,7 @@ const JWT_SECRET: &str = "test-jwt-secret"; async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20100).await?; // Run a pubkeys request let jwt = create_jwt(&module_id, JWT_SECRET)?; @@ -39,7 +39,7 @@ async fn test_signer_jwt_auth_success() -> Result<()> { async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20200).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -59,7 +59,7 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20300).await?; // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -88,7 +88,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { // Starts the signer moduler server on a separate task and returns its // configuration -async fn start_server() -> Result { +async fn start_server(port: u16) -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -104,6 +104,7 @@ async fn start_server() -> Result { format: ValidatorKeysFormat::Lighthouse, }; let mut config = get_signer_config(loader); + config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); From 346eea4c0ee7c6e7e53ec1f6c950e3289be214dd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 16:36:11 -0400 Subject: [PATCH 29/35] Cleaned up the build Dockerfile and removed an extra dependency layer --- provisioning/build.Dockerfile | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 34ad27a5..43713cc5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,7 +1,10 @@ # This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +FROM --platform=${BUILDPLATFORM} rust:1.83-slim-bookworm AS chef ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE +ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse WORKDIR /app +RUN cargo install cargo-chef --locked && \ + rm -rf $CARGO_HOME/registry/ FROM --platform=${BUILDPLATFORM} chef AS planner ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE @@ -20,8 +23,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add aarch64-unknown-linux-gnu && \ dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -35,8 +38,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add x86_64-unknown-linux-gnu && \ dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -49,12 +52,14 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ + apt update && \ + apt install -y git libssl-dev:${TARGETARCH} zlib1g-dev:${TARGETARCH} pkg-config && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old From 7b20d2f885efa8591d834d1deebb7b550d89683d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 05:03:25 -0400 Subject: [PATCH 30/35] Ported the build script over to the justfile --- build-linux.sh | 144 ------------------------ docs/docs/get_started/building.md | 28 ++--- justfile | 175 +++++++++++++++++++++++++++++- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 5 files changed, 185 insertions(+), 166 deletions(-) delete mode 100755 build-linux.sh diff --git a/build-linux.sh b/build-linux.sh deleted file mode 100755 index a7266bd9..00000000 --- a/build-linux.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash - -# This script will build the Commit-Boost applications and modules for local Linux development. - -# ================= -# === Functions === -# ================= - -# Print a failure message to stderr and exit -fail() { - MESSAGE=$1 - RED='\033[0;31m' - RESET='\033[;0m' - >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" - exit 1 -} - - -# Builds the CLI binaries for Linux -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_cli() { - echo "Building CLI binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - echo "done!" -} - - -# Builds the PBS module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_pbs() { - echo "Building PBS binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building PBS Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." - fi - echo "done!" -} - - -# Builds the Signer module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_signer() { - echo "Building Signer binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building Signer Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." - fi - echo "done!" -} - - -# Print usage -usage() { - echo "Usage: build.sh [options] -v " - echo "This script assumes it is in the commit-boost-client repository directory." - echo "Options:" - echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' - echo $'\t-c\tBuild the Commit-Boost CLI binaries' - echo $'\t-p\tBuild the PBS module binary and its Docker container' - echo $'\t-s\tBuild the Signer module binary and its Docker container' - echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' - exit 0 -} - - -# ================= -# === Main Body === -# ================= - -# Parse arguments -while getopts "acpsov:" FLAG; do - case "$FLAG" in - a) CLI=true PBS=true SIGNER=true ;; - c) CLI=true ;; - p) PBS=true ;; - s) SIGNER=true ;; - o) LOCAL_UPLOAD=true ;; - v) VERSION="$OPTARG" ;; - *) usage ;; - esac -done -if [ -z "$VERSION" ]; then - usage -fi - -# Cleanup old artifacts -rm -rf build/$VERSION/* -mkdir -p build/$VERSION - -# Make a multiarch builder, ignore if it's already there -docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 -# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: -# https://stackoverflow.com/a/73585243 - -# Build the artifacts -if [ "$CLI" = true ]; then - build_cli -fi -if [ "$PBS" = true ]; then - build_pbs -fi -if [ "$SIGNER" = true ]; then - build_signer -fi diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..edf795b2 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -5,31 +5,27 @@ Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). ## Building via the Docker Builder -For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. It utilizes Docker's powerful [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. :::note -The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +The build system assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). ::: -We provide a build script called `build-linux.sh` to automate the process: +The Docker builder is built into the project's `justfile` which is used to invoke many facets of Commit Boost development. To use it, you'll need to install [Just](https://github.com/casey/just) on your system. -``` -$ ./build-linux.sh -Usage: build.sh [options] -v -This script assumes it is in the commit-boost-client repository directory. -Options: - -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) - -c Build the Commit-Boost CLI binaries - -p Build the PBS module binary and its Docker container - -s Build the Signer module binary and its Docker container - -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY -``` +Use `just --list` to show all of the actions - there are many. The `justfile` provides granular actions, called "recipes", for building just the binaries of a specific crate (such as the CLI, `pbs`, or `signer`), as well as actions to build the Docker images for the PBS and Signer modules. + +Below is a brief summary of the relevant ones for building the Commit-Boost artifacts: + +- `build-all ` will build the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries for your local system architecture. It will also create Docker images called `commit-boost/pbs:` and `commit-boost/signer:` and load them into your local Docker registry for use. +- `build-cli-bin `, `build-pbs-bin `, and `build-signer-bin ` can be used to create the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries, respectively. +- `build-pbs-img ` and `build-signer-img ` can be used to create the Docker images for the PBS and Signer modules, respectively. -The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. +The `version` provided will be used to house the output binaries in `./build/`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. -The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. +If you're interested in building the binaries and/or Docker images for multiple architectures (currently Linux `amd64` and `arm64`), use the variants of those recipes that have the `-multiarch` suffix. Note that building a multiarch Docker image manifest will require the use of a [custom Docker registry](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-private-docker-registry-on-ubuntu-20-04), as the local registry built into Docker does not have multiarch manifest support. ## Building Manually diff --git a/justfile b/justfile index d13e76ae..ac1314fc 100644 --- a/justfile +++ b/justfile @@ -12,16 +12,183 @@ fmt-check: clippy: cargo +{{toolchain}} clippy --all-features --no-deps -- -D warnings -docker-build-pbs: - docker build -t commitboost_pbs_default . -f ./provisioning/pbs.Dockerfile +# =================================== +# === Build Commands for Services === +# =================================== -docker-build-signer: - docker build -t commitboost_signer . -f ./provisioning/signer.Dockerfile +[doc(""" + Builds the commit-boost-cli binary to './build/'. +""")] +build-cli version: \ + (_docker-build-binary version "cli") + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-cli crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). +""")] +build-cli-multiarch version: \ + (_docker-build-binary-multiarch version "cli") + +[doc(""" + Builds the commit-boost-pbs binary to './build/'. +""")] +build-pbs-bin version: \ + (_docker-build-binary version "pbs") + +[doc(""" + Creates a Docker image named 'commit-boost/pbs:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-pbs-img version: \ + (_docker-build-image version "pbs") + +[doc(""" + Builds the commit-boost-pbs binary to './build/' and creates a Docker image named 'commit-boost/pbs:'. +""")] +build-pbs version: \ + (build-pbs-bin version) \ + (build-pbs-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the pbs Docker image. +""")] +build-pbs-bin-multiarch version: \ + (_docker-build-binary-multiarch version "pbs") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "pbs" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-multiarch version local-docker-registry: \ + (build-pbs-bin-multiarch version) \ + (build-pbs-img-multiarch version local-docker-registry) + +[doc(""" + Builds the commit-boost-signer binary to './build/'. +""")] +build-signer-bin version: \ + (_docker-build-binary version "signer") + +[doc(""" + Creates a Docker image named 'commit-boost/signer:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-signer-img version: \ + (_docker-build-image version "signer") + +[doc(""" + Builds the commit-boost-signer binary to './build/' and creates a Docker image named 'commit-boost/signer:'. +""")] +build-signer version: \ + (build-signer-bin version) \ + (build-signer-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the signer Docker image. +""")] +build-signer-bin-multiarch version: \ + (_docker-build-binary-multiarch version "signer") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "signer" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-multiarch version local-docker-registry: \ + (build-signer-bin-multiarch version) \ + (build-signer-img-multiarch version local-docker-registry) + +[doc(""" + Builds the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build/'. + The Docker images will be named 'commit-boost/cli:', 'commit-boost/pbs:', and + 'commit-boost/signer:'. +""")] +build-all version: \ + (build-cli version) \ + (build-pbs version) \ + (build-signer version) + +[doc(""" + Builds amd64 and arm64 flavors of the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Also creates multiarch Docker image manifests for each crate and pushes them to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-all-multiarch version local-docker-registry: \ + (build-cli-multiarch version) \ + (build-pbs-multiarch version local-docker-registry) \ + (build-signer-multiarch version local-docker-registry) + +# =============================== +# === Builder Implementations === +# =============================== + +# Creates a Docker buildx builder if it doesn't already exist +_create-docker-builder: + docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 || true + +# Builds a binary for a specific crate and version +_docker-build-binary version crate: _create-docker-builder + export PLATFORM=$(docker buildx inspect --bootstrap | awk -F': ' '/Platforms/ {print $2}' | cut -d',' -f1 | xargs | tr '/' '_'); \ + docker buildx build --rm --platform=local -f provisioning/build.Dockerfile --output "build/{{version}}/$PLATFORM" --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a Docker image for a specific crate and version +_docker-build-image version crate: _create-docker-builder + docker buildx build --rm --load --build-arg BINARIES_PATH=build/{{version}} -t commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile . + +# Builds multiple binaries (for Linux amd64 and arm64 architectures) for a specific crate and version +_docker-build-binary-multiarch version crate: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/{{version}} --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a multi-architecture (Linux amd64 and arm64) Docker manifest for a specific crate and version. +# Uploads to the custom Docker registry (such as '192.168.1.10:5000') instead of a public registry like GHCR or Docker Hub. +_docker-build-image-multiarch version crate local-docker-registry: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/{{version}} -t {{local-docker-registry}}/commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile --push . + +# ================= +# === Utilities === +# ================= docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile docker build -t test_status_api . -f examples/status_api/Dockerfile +# Cleans the build directory, removing all built binaries. +# Docker images are not removed by this command. +clean: + rm -rf build + +# Runs the suite of tests for all commit-boost crates. test: cargo test --all-features \ No newline at end of file diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eb72702..6b9496ec 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-pbs /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 05679762..5ea619b2 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-signer /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From ca9f4a1997103e81427d3c9ca04a54317ce9fb2b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 16:08:50 -0400 Subject: [PATCH 31/35] Added a justfile recipe for installing protoc --- docs/docs/get_started/building.md | 19 ++++++------------- justfile | 3 +++ provisioning/protoc.sh | 11 +++++++---- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index edf795b2..a00b36cf 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -53,24 +53,17 @@ sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-de Install the Protobuf compiler: :::note -While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that Debian-based ones use v3.21 which is quite out of date. We recommend getting the latest version manually. ::: +We provide a convenient recipe to install the latest version directly from the GitHub releases page: + ```bash -PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') -MACHINE_ARCH=$(uname -m) -case "${MACHINE_ARCH}" in - aarch64) PROTOC_ARCH=aarch_64;; - x86_64) PROTOC_ARCH=x86_64;; - *) echo "${MACHINE_ARCH} is not supported."; exit 1;; -esac -curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip -sudo unzip -q protoc.zip bin/protoc -d /usr -sudo unzip -q protoc.zip "include/google/*" -d /usr -sudo chmod a+x /usr/bin/protoc -rm -rf protoc.zip +just install-protoc ``` +This works on OSX and Linux systems, but you are welcome to download and install it manually as well. + With the prerequisites set up, pull the repository: ```bash git clone https://github.com/Commit-Boost/commit-boost-client diff --git a/justfile b/justfile index ac1314fc..ee5f4c2d 100644 --- a/justfile +++ b/justfile @@ -179,6 +179,9 @@ _docker-build-image-multiarch version crate local-docker-registry: _create-docke # === Utilities === # ================= +install-protoc: + provisioning/protoc.sh + docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh index 7f66a656..a727a7c1 100755 --- a/provisioning/protoc.sh +++ b/provisioning/protoc.sh @@ -21,7 +21,10 @@ case "$(uname)" in Linux*) PROTOC_OS="linux" ; TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually - apt update && apt install -y unzip curl ca-certificates jq ;; + if [ $(id -u) != "0" ]; then + CMD_PREFIX="sudo " ; + fi + ${CMD_PREFIX}apt update && ${CMD_PREFIX}apt install -y unzip curl ca-certificates jq ;; *) echo "Unsupported OS: $(uname)" ; exit 1 ;; @@ -50,8 +53,8 @@ echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" # Download and install protoc curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" -unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" -unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" -chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +${CMD_PREFIX}unzip -qo protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +${CMD_PREFIX}unzip -qo protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +${CMD_PREFIX}chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" rm -rf protoc.zip || fail "Failed to remove protoc zip file" echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file From d53728821c88045dd9f6f87c37a9ad076647d601 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:21:12 -0400 Subject: [PATCH 32/35] Update crates/cli/src/docker_init.rs Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- crates/cli/src/docker_init.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 652e3448..c6fcd533 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -73,11 +73,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = if let Some(signer_config) = &cb_config.signer { - signer_config.port - } else { - DEFAULT_SIGNER_PORT - }; + let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(DEFAULT_SIGNER_PORT); let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() From 7afb7633fb3f75baacb88eb3d1600bd15c2e2cc6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:15 -0400 Subject: [PATCH 33/35] Added example signer config params --- config.example.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.example.toml b/config.example.toml index ae69c3ff..89d472c1 100644 --- a/config.example.toml +++ b/config.example.toml @@ -148,6 +148,13 @@ url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09f # Docker image to use for the Signer module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/signer:latest # docker_image = "ghcr.io/commit-boost/signer:latest" +# Host to bind the Signer API server to +# OPTIONAL, DEFAULT: 127.0.0.1 +host = "127.0.0.1" +# Port to listen for Signer API calls on +# OPTIONAL, DEFAULT: 20000 +port = 20000 + # For Remote signer: # [signer.remote] # URL of the Web3Signer instance From 09ac8217f686b378dda48a86fd2b78bad9493b92 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:31 -0400 Subject: [PATCH 34/35] Cleaned up signer config loading from feedback --- crates/common/src/config/signer.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..5618f3ae 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -109,20 +109,17 @@ impl StartSignerConfig { let jwts = load_jwt_secrets()?; - // Load the server endpoint first from the env var, then the config, and finally - // the defaults + let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; + + // Load the server endpoint first from the env var if present, otherwise the + // config let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { endpoint.parse()? } else { - match config.signer { - Some(ref signer) => SocketAddr::from((signer.host, signer.port)), - None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), - } + SocketAddr::from((signer_config.host, signer_config.port)) }; - let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; - - match signer { + match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), From ccaf97dc48b94583cd90a20a4ac14ef3bf204d33 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 14:30:13 -0400 Subject: [PATCH 35/35] Added JWT auth fields to the example config --- config.example.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config.example.toml b/config.example.toml index d32dfbf9..899c6a10 100644 --- a/config.example.toml +++ b/config.example.toml @@ -154,6 +154,12 @@ host = "127.0.0.1" # Port to listen for Signer API calls on # OPTIONAL, DEFAULT: 20000 port = 20000 +# Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access +# OPTIONAL, DEFAULT: 3 +jwt_auth_fail_limit: 3 +# How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times +# OPTIONAL, DEFAULT: 300 +jwt_auth_fail_timeout_seconds: 300 # For Remote signer: # [signer.remote]