diff --git a/.github/workflows/canton.yml b/.github/workflows/canton.yml new file mode 100644 index 000000000..c1ad99ffb --- /dev/null +++ b/.github/workflows/canton.yml @@ -0,0 +1,101 @@ +name: Canton Integration Tests + +on: + schedule: + - cron: "0 4 * * *" # Daily at 4:00 AM UTC (after nightly at 3:00 AM) + workflow_dispatch: # Allow manual triggers + push: + branches: + - develop + paths: + - integration-tests/src/canton.rs + - integration-tests/tests/cases/canton*.rs + - integration-tests/fixtures/canton/** + - chain-signatures/canton-types/** + - chain-signatures/node/src/indexer_canton/** + pull_request: + paths: + - integration-tests/src/canton.rs + - integration-tests/tests/cases/canton*.rs + - integration-tests/fixtures/canton/** + - chain-signatures/canton-types/** + - chain-signatures/node/src/indexer_canton/** + +jobs: + canton-tests: + name: Canton Stream Tests + runs-on: warp-ubuntu-latest-x64-4x + + steps: + - uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Pull Redis + run: docker pull redis:7.4.2 + + - name: Install Java 21 + uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: '21' + + - name: Install dpm (Canton CLI) + run: | + curl -fsSL https://get.digitalasset.com/install/install.sh | sh + echo "$HOME/.dpm/bin" >> $GITHUB_PATH + export PATH="$HOME/.dpm/bin:$PATH" + dpm --version + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + + - name: Install Rust (1.81.0) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: 1.81.0 + target: wasm32-unknown-unknown + + - name: Install Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + + - uses: WarpBuilds/cache@v1 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: "${{ runner.os }}-cargo-${{ hashFiles('chain-signatures/Cargo.lock') }}" + restore-keys: ${{ runner.os }}-cargo- + + - name: Build Chain-Signatures Contract + run: ./build-contract.sh + + - name: Build eth contract + working-directory: ./chain-signatures/contract-eth + run: npm i && npx hardhat compile + + - name: Build Chain-Signatures Node + run: cargo build -p mpc-node --release + + - name: Build integration tests + run: cargo build -p integration-tests --tests + + - name: Run Canton stream tests + run: cargo test -p integration-tests --test lib -- canton_stream --ignored --nocapture --test-threads 1 + env: + RUST_LOG: info,workspaces=warn + RUST_BACKTRACE: 1 diff --git a/.gitignore b/.gitignore index 16a50c0a2..3e9c0def1 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ tmp *.log +*.log.*.gz # Artifacts that may be left over **/*-secret-manager-* diff --git a/Cargo.lock b/Cargo.lock index 05c7d1193..c55a2bfca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2656,6 +2656,14 @@ dependencies = [ "serde_core", ] +[[package]] +name = "canton-types" +version = "1.11.0" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "caps" version = "0.5.5" @@ -2934,7 +2942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3516,7 +3524,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.101", ] [[package]] @@ -4026,6 +4034,7 @@ dependencies = [ "generic-array", "group", "hkdf", + "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", @@ -4546,7 +4555,7 @@ dependencies = [ "hashers", "http 0.2.12", "instant", - "jsonwebtoken", + "jsonwebtoken 8.3.0", "once_cell", "pin-project", "reqwest 0.11.27", @@ -4554,7 +4563,7 @@ dependencies = [ "serde_json", "thiserror 1.0.69", "tokio", - "tokio-tungstenite", + "tokio-tungstenite 0.20.1", "tracing", "tracing-futures", "url 2.5.4", @@ -6028,7 +6037,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.9", "tokio", "tower-service", "tracing", @@ -6567,6 +6576,7 @@ dependencies = [ "borsh 1.5.7", "bs58 0.5.1", "cait-sith", + "canton-types", "ciborium", "clap", "criterion", @@ -6579,6 +6589,7 @@ dependencies = [ "generic-array", "hex", "hyper 0.14.32", + "jsonwebtoken 10.3.0", "k256", "mpc-contract", "mpc-crypto", @@ -7079,13 +7090,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.7", - "pem", + "pem 1.1.1", "ring 0.16.20", "serde", "serde_json", "simple_asn1", ] +[[package]] +name = "jsonwebtoken" +version = "10.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0529410abe238729a60b108898784df8984c87f6054c9c4fcacc47e4803c1ce1" +dependencies = [ + "base64 0.22.1", + "ed25519-dalek 2.1.1", + "getrandom 0.2.16", + "hmac 0.12.1", + "js-sys", + "p256", + "p384", + "pem 3.0.6", + "rand 0.8.5", + "rsa", + "serde", + "serde_json", + "sha2 0.10.9", + "signature 2.2.0", + "simple_asn1", +] + [[package]] name = "k256" version = "0.13.4" @@ -7971,6 +8005,7 @@ dependencies = [ "borsh 0.10.4", "borsh 1.5.7", "cait-sith", + "canton-types", "chrono", "ciborium", "clap", @@ -7987,6 +8022,7 @@ dependencies = [ "http 1.3.1", "hyper 0.14.32", "hyper-rustls 0.24.2", + "jsonwebtoken 10.3.0", "k256", "local-ip-address", "lru 0.13.0", @@ -8006,6 +8042,7 @@ dependencies = [ "opentelemetry-appender-tracing", "opentelemetry-otlp", "opentelemetry_sdk", + "p256", "parity-scale-codec", "prometheus 0.14.0", "rand 0.8.5", @@ -8033,6 +8070,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", + "tokio-tungstenite 0.29.0", "tracing", "tracing-opentelemetry", "tracing-stackdriver", @@ -9308,6 +9346,22 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.2.4" @@ -9445,7 +9499,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 3.3.0", "proc-macro2", "quote", "syn 2.0.101", @@ -9778,6 +9832,18 @@ dependencies = [ "sha2 0.10.9", ] +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.9", +] + [[package]] name = "pairing" version = "0.23.0" @@ -9794,8 +9860,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -9997,6 +10063,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -10144,6 +10220,17 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -10880,7 +10967,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ - "pem", + "pem 1.1.1", "ring 0.16.20", "time", "yasna", @@ -11458,6 +11545,26 @@ dependencies = [ "serde", ] +[[package]] +name = "rsa" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature 2.2.0", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "rtnetlink" version = "0.13.1" @@ -14078,8 +14185,8 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-stream", - "tokio-tungstenite", - "tungstenite", + "tokio-tungstenite 0.20.1", + "tungstenite 0.20.1", "url 2.5.4", ] @@ -14628,7 +14735,7 @@ dependencies = [ "libc", "log", "nix 0.29.0", - "pem", + "pem 1.1.1", "percentage", "quinn", "quinn-proto 0.11.14", @@ -16716,10 +16823,24 @@ dependencies = [ "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", - "tungstenite", + "tungstenite 0.20.1", "webpki-roots 0.25.4", ] +[[package]] +name = "tokio-tungstenite" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f72a05e828585856dacd553fba484c242c46e391fb0e58917c942ee9202915c" +dependencies = [ + "futures-util", + "log", + "native-tls", + "tokio", + "tokio-native-tls", + "tungstenite 0.29.0", +] + [[package]] name = "tokio-util" version = "0.7.15" @@ -17096,6 +17217,23 @@ dependencies = [ "webpki-roots 0.24.0", ] +[[package]] +name = "tungstenite" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c01152af293afb9c7c2a57e4b559c5620b421f6d133261c60dd2d0cdb38e6b8" +dependencies = [ + "bytes", + "data-encoding", + "http 1.3.1", + "httparse", + "log", + "native-tls", + "rand 0.9.1", + "sha1", + "thiserror 2.0.12", +] + [[package]] name = "tuplex" version = "0.1.2" @@ -17108,9 +17246,9 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.7", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] @@ -17780,7 +17918,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 78e300dd5..50ec75135 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "chain-signatures/canton-types", "chain-signatures/crypto", "chain-signatures/contract", "chain-signatures/contract-sol", @@ -18,12 +19,14 @@ version = "1.11.0" alloy = { version = "=1.0.38", features = ["contract", "json"] } anyhow = { version = "1.0.95", features = ["backtrace"] } borsh = "1.5.3" +canton-types = { path = "chain-signatures/canton-types" } cait-sith = { git = "https://github.com/sig-net/cait-sith", rev = "9f34e8c", features = ["k256"] } ciborium = "0.2.2" clap = { version = "4.5.4", features = ["derive", "env"] } deadpool-redis = "0.18.0" hex = "0.4.3" hyper = { version = "0.14", features = ["full"] } +jsonwebtoken = { version = "10", features = ["rust_crypto"] } k256 = { version = "0.13.1", features = [ "sha256", "ecdsa", @@ -40,6 +43,7 @@ serde_json = "1" sha3 = "0.10.8" thiserror = "1" tokio = { version = "1.45.1", features = ["full"] } +tokio-tungstenite = { version = "0.29", features = ["native-tls"] } tracing = "0.1.35" tracing-subscriber = { version = "0.3.20", default-features = false, features = [ "env-filter", @@ -63,6 +67,7 @@ alloy-json-abi = "1.4.1" alloy-rlp = "0.3.12" solana-client = "2.2.7" futures-util = "0.3.31" +p256 = { version = "0.13.2", features = ["ecdsa"] } parity-scale-codec = { version = "3", features = ["derive"] } near-account-id = "1.0.0" diff --git a/chain-signatures/canton-types/Cargo.toml b/chain-signatures/canton-types/Cargo.toml new file mode 100644 index 000000000..bf4a870c3 --- /dev/null +++ b/chain-signatures/canton-types/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "canton-types" +version.workspace = true +edition = "2021" +publish = false + +[dependencies] +serde.workspace = true +serde_json.workspace = true diff --git a/chain-signatures/canton-types/src/contracts.rs b/chain-signatures/canton-types/src/contracts.rs new file mode 100644 index 000000000..3b1cffc43 --- /dev/null +++ b/chain-signatures/canton-types/src/contracts.rs @@ -0,0 +1,133 @@ +//! Typed structs for Daml contract payloads. +//! +//! These represent the JSON payloads inside `CreatedEvent.payload` for specific +//! Daml templates from `daml-signer` and `daml-vault`. Derived from the `.daml` +//! source files in `canton-mpc-poc/daml-packages/`. +//! +//! All fields are raw JSON types (strings). Conversion to internal types +//! (e.g., hex → `[u8; 32]`, DER → `Signature`) is the consumer's responsibility. + +use serde::{Deserialize, Serialize}; + +// --------------------------------------------------------------------------- +// From daml-vault/daml/Erc20Vault.daml — EvmTransactionParams record +// --------------------------------------------------------------------------- + +/// EVM transaction parameters passed through the Vault contract. +/// All fields are hex-encoded strings (padded to 64 chars). +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EvmTransactionParams { + pub to: String, + pub function_signature: String, + #[serde(default)] + pub args: Vec, + pub value: String, + pub nonce: String, + pub gas_limit: String, + pub max_fee_per_gas: String, + /// Daml field name is `maxPriorityFee` (NOT `maxPriorityFeePerGas`). + pub max_priority_fee: String, + pub chain_id: String, +} + +// --------------------------------------------------------------------------- +// From daml-signer/daml/Signer.daml — SignBidirectionalEvent +// --------------------------------------------------------------------------- + +/// Payload of a `Signer:SignBidirectionalEvent` created event. +/// Emitted when a Vault exercises `RequestDeposit` → `Signer.SignBidirectional`. +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SignBidirectionalRequestedEvent { + pub operators: Vec, + pub sender: String, + pub requester: String, + pub sig_network: String, + pub evm_tx_params: EvmTransactionParams, + pub caip2_id: String, + /// Canton sends this as either a number or a string. + #[serde(deserialize_with = "deserialize_u32_lenient")] + pub key_version: u32, + pub path: String, + pub algo: String, + pub dest: String, + #[serde(default)] + pub params: String, + pub nonce_cid_text: String, + #[serde(default)] + pub output_deserialization_schema: String, + #[serde(default)] + pub respond_serialization_schema: String, +} + + +// --------------------------------------------------------------------------- +// From daml-signer/daml/Signer.daml — SignatureRespondedEvent +// --------------------------------------------------------------------------- + +/// Raw payload of a `Signer:SignatureRespondedEvent` created event. +/// Fields are hex strings; conversion to `[u8; 32]` / `Signature` is the +/// consumer's responsibility (avoiding a dependency on `mpc-primitives`). +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SignatureRespondedEventPayload { + /// Hex-encoded 32-byte request ID. + pub request_id: String, + pub responder: String, + /// DER-encoded hex signature. + pub signature: String, +} + +// --------------------------------------------------------------------------- +// From daml-signer/daml/Signer.daml — RespondBidirectionalEvent +// --------------------------------------------------------------------------- + +/// Raw payload of a `Signer:RespondBidirectionalEvent` created event. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RespondBidirectionalEventPayload { + /// Hex-encoded 32-byte request ID. + pub request_id: String, + pub responder: String, + /// Hex-encoded serialized output from the destination chain. + pub serialized_output: String, + /// DER-encoded hex signature. + pub signature: String, +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Deserialize a u32 from either a JSON number or a JSON string. +fn deserialize_u32_lenient<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + use serde::de; + + struct U32Visitor; + impl<'de> de::Visitor<'de> for U32Visitor { + type Value = u32; + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("a u32 as number or string") + } + fn visit_u64(self, v: u64) -> Result { + u32::try_from(v).map_err(|_| E::custom(format!("u32 overflow: {v}"))) + } + fn visit_i64(self, v: i64) -> Result { + u32::try_from(v).map_err(|_| E::custom(format!("u32 overflow: {v}"))) + } + fn visit_f64(self, v: f64) -> Result { + if v.fract() != 0.0 || v < 0.0 || v > u32::MAX as f64 { + return Err(E::custom(format!("invalid u32 float: {v}"))); + } + Ok(v as u32) + } + fn visit_str(self, v: &str) -> Result { + v.parse().map_err(|_| E::custom(format!("invalid u32 string: {v}"))) + } + } + deserializer.deserialize_any(U32Visitor) +} diff --git a/chain-signatures/canton-types/src/ledger_api.rs b/chain-signatures/canton-types/src/ledger_api.rs new file mode 100644 index 000000000..c08eb4dd9 --- /dev/null +++ b/chain-signatures/canton-types/src/ledger_api.rs @@ -0,0 +1,416 @@ +//! Typed structs for the Canton JSON Ledger API v2. +//! +//! Derived from the OpenAPI spec (version 3.4.11). Only types that the MPC node +//! or integration tests actually use are included. + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +// --------------------------------------------------------------------------- +// Commands (request construction) +// --------------------------------------------------------------------------- + +/// Wrapper for `POST /v2/commands/submit-and-wait-for-transaction`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SubmitAndWaitForTransactionRequest { + pub commands: JsCommands, +} + +/// The commands payload sent to the ledger. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsCommands { + pub command_id: String, + pub user_id: String, + pub act_as: Vec, + #[serde(default)] + pub read_as: Vec, + pub commands: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub disclosed_contracts: Vec, +} + +/// A single ledger command (externally tagged enum). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Command { + CreateCommand { + #[serde(rename = "templateId")] + template_id: String, + #[serde(rename = "createArguments")] + create_arguments: Value, + }, + ExerciseCommand { + #[serde(rename = "templateId")] + template_id: String, + #[serde(rename = "contractId")] + contract_id: String, + choice: String, + #[serde(rename = "choiceArgument")] + choice_argument: Value, + }, +} + +/// A disclosed contract for cross-party visibility. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DisclosedContract { + pub template_id: String, + pub contract_id: String, + pub created_event_blob: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub synchronizer_id: Option, +} + +// --------------------------------------------------------------------------- +// Events (response parsing) +// --------------------------------------------------------------------------- + +/// Response from `POST /v2/commands/submit-and-wait-for-transaction`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SubmitAndWaitForTransactionResponse { + pub transaction: Transaction, +} + +/// A ledger transaction containing events. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Transaction { + pub offset: Value, + #[serde(default)] + pub events: Vec, +} + +/// A ledger event (externally tagged enum). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Event { + CreatedEvent(CreatedEvent), + ArchivedEvent(ArchivedEvent), + ExercisedEvent(ExercisedEvent), +} + +/// A contract creation event. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreatedEvent { + pub contract_id: String, + pub template_id: String, + /// The contract payload. Use domain types from [`crate::contracts`] to + /// deserialize into typed structs via `serde_json::from_value()`. + #[serde(alias = "createArgument")] + pub payload: Value, + #[serde(default)] + pub created_event_blob: Option, + /// Parties that are signatories on this contract. + #[serde(default)] + pub signatories: Vec, + /// Parties whose participants witnessed (confirmed) this transaction. + #[serde(default)] + pub witness_parties: Vec, + /// Position of this event in the transaction tree (LEDGER_EFFECTS only). + #[serde(default)] + pub node_id: Option, + /// The package name of the contract. + #[serde(default)] + pub package_name: Option, +} + +/// A contract archive event. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchivedEvent { + pub contract_id: String, + pub template_id: String, + #[serde(default)] + pub package_name: Option, +} + +/// A choice exercise event (LEDGER_EFFECTS shape). +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExercisedEvent { + pub contract_id: String, + pub template_id: String, + pub choice: String, + #[serde(default)] + pub acting_parties: Vec, + #[serde(default)] + pub consuming: bool, + #[serde(default)] + pub node_id: Option, + /// Upper boundary of descendant node IDs in this transaction. + #[serde(default)] + pub last_descendant_node_id: Option, + #[serde(default)] + pub package_name: Option, +} + +// --------------------------------------------------------------------------- +// WebSocket subscription +// --------------------------------------------------------------------------- + +/// Subscription message sent to `ws://.../v2/updates`. +/// +/// Uses `updateFormat` (Canton 3.4+) instead of the deprecated +/// `filter`/`verbose` top-level fields +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetUpdatesRequest { + pub begin_exclusive: u64, + pub update_format: UpdateFormat, +} + +/// Specifies what updates to include and how to render them. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UpdateFormat { + pub include_transactions: TransactionFormat, +} + +/// Specifies the transaction shape and event format. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionFormat { + pub transaction_shape: String, + pub event_format: EventFormatInline, +} + +/// Inline event format for the subscription message. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EventFormatInline { + #[serde(default)] + pub filters_by_party: serde_json::Map, + #[serde(default)] + pub verbose: bool, +} + +/// A message received from the updates WebSocket stream. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UpdateMessage { + #[serde(default)] + pub update: Option, + #[serde(default)] + pub error: Option, +} + +/// Discriminated update types from the WebSocket stream. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Update { + Transaction { + value: TransactionUpdate, + }, + OffsetCheckpoint { + value: OffsetCheckpointValue, + }, +} + +/// The value inside an Update::Transaction. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionUpdate { + pub offset: u64, + #[serde(default)] + pub events: Vec, +} + +/// The value inside an Update::OffsetCheckpoint. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OffsetCheckpointValue { + pub offset: u64, +} + +// --------------------------------------------------------------------------- +// Parties +// --------------------------------------------------------------------------- + +/// Request body for `POST /v2/parties`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AllocatePartyRequest { + pub party_id_hint: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub identity_provider_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub synchronizer_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub user_id: Option, +} + +/// Response from `POST /v2/parties`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AllocatePartyResponse { + pub party_details: PartyDetails, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PartyDetails { + pub party: String, + #[serde(default)] + pub is_local: bool, +} + +// --------------------------------------------------------------------------- +// Users +// --------------------------------------------------------------------------- + +/// Request body for `POST /v2/users`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CreateUserRequest { + pub user: UserInfo, + #[serde(default)] + pub rights: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UserInfo { + pub id: String, + pub primary_party: String, + #[serde(default)] + pub is_deactivated: bool, + #[serde(default)] + pub identity_provider_id: String, +} + +/// A user right (externally tagged enum with nested kind wrapper). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UserRight { + pub kind: UserRightKind, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum UserRightKind { + CanActAs { value: PartyValue }, + CanReadAs { value: PartyValue }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PartyValue { + pub party: String, +} + +// --------------------------------------------------------------------------- +// Active Contracts +// --------------------------------------------------------------------------- + +/// Request body for `POST /v2/state/active-contracts`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetActiveContractsRequest { + pub active_at_offset: u64, + pub event_format: EventFormat, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EventFormat { + pub filters_by_party: serde_json::Map, + #[serde(default)] + pub verbose: bool, +} + +/// A single item in the active contracts response array. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ActiveContractEntry { + #[serde(default)] + pub contract_entry: Option, +} + +/// Wraps the active contract variant. +/// Canton API can return JsEmpty, JsIncompleteAssigned, JsIncompleteUnassigned +/// in addition to JsActiveContract. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum ContractEntry { + JsActiveContract(JsActiveContract), + JsEmpty(Value), + JsIncompleteAssigned(Value), + JsIncompleteUnassigned(Value), +} + +/// An active contract with its created event. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsActiveContract { + pub created_event: CreatedEvent, + #[serde(default)] + pub synchronizer_id: Option, +} + +// --------------------------------------------------------------------------- +// Ledger End +// --------------------------------------------------------------------------- + +/// Response from `GET /v2/state/ledger-end`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LedgerEndResponse { + pub offset: u64, +} + +// --------------------------------------------------------------------------- +// Errors +// --------------------------------------------------------------------------- + +/// Canton error response body. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CantonError { + #[serde(default)] + pub code: String, + #[serde(default)] + pub cause: String, +} + +// --------------------------------------------------------------------------- +// Template constants and matching +// --------------------------------------------------------------------------- + +/// Known Daml template suffixes from daml-signer. +pub mod templates { + pub const SIGNER: &str = "Signer:Signer"; + pub const SIGN_BIDIRECTIONAL_EVENT: &str = "Signer:SignBidirectionalEvent"; + pub const SIGNATURE_RESPONDED_EVENT: &str = "Signer:SignatureRespondedEvent"; + pub const RESPOND_BIDIRECTIONAL_EVENT: &str = "Signer:RespondBidirectionalEvent"; + pub const SIGNING_NONCE: &str = "Signer:SigningNonce"; +} + +/// Check if a template ID matches a given suffix at a module boundary. +/// Requires the suffix to be preceded by `:` (package separator) or match exactly. +pub fn template_suffix_matches(template_id: &str, suffix: &str) -> bool { + template_id == suffix + || template_id.ends_with(&format!(":{suffix}")) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Build a UserRight for CanActAs. +pub fn can_act_as(party: &str) -> UserRight { + UserRight { + kind: UserRightKind::CanActAs { + value: PartyValue { + party: party.to_string(), + }, + }, + } +} + +/// Build a UserRight for CanReadAs. +pub fn can_read_as(party: &str) -> UserRight { + UserRight { + kind: UserRightKind::CanReadAs { + value: PartyValue { + party: party.to_string(), + }, + }, + } +} diff --git a/chain-signatures/canton-types/src/lib.rs b/chain-signatures/canton-types/src/lib.rs new file mode 100644 index 000000000..13d25a66e --- /dev/null +++ b/chain-signatures/canton-types/src/lib.rs @@ -0,0 +1,2 @@ +pub mod contracts; +pub mod ledger_api; diff --git a/chain-signatures/crypto/src/kdf.rs b/chain-signatures/crypto/src/kdf.rs index 9f8269258..99f990131 100644 --- a/chain-signatures/crypto/src/kdf.rs +++ b/chain-signatures/crypto/src/kdf.rs @@ -68,6 +68,11 @@ pub fn derive_epsilon_hydration(key_version: u32, sender: &str, path: &str) -> S keccak(derivation_path.as_bytes()) } +pub fn derive_epsilon_canton(key_version: u32, sender: &str, path: &str) -> Scalar { + let derivation_path = derivation_path(key_version, Chain::Canton, sender, path); + keccak(derivation_path.as_bytes()) +} + pub fn derive_key(public_key: PublicKey, epsilon: Scalar) -> PublicKey { (::ProjectivePoint::GENERATOR * epsilon + public_key).to_affine() } @@ -181,6 +186,15 @@ mod tests { derivation_path(1, Chain::Bitcoin, "sender", "path"), "sig.network v2.0.0 epsilon derivation:bip122:000000000019d6689c085ae165831e93:sender:path" ); + + assert_eq!( + derivation_path(0, Chain::Canton, "sender", "path"), + "sig.network v1.0.0 epsilon derivation,canton:global,sender,path" + ); + assert_eq!( + derivation_path(1, Chain::Canton, "sender", "path"), + "sig.network v2.0.0 epsilon derivation:canton:global:sender:path" + ); } #[test] @@ -302,6 +316,26 @@ mod tests { assert_eq!(derived_secret_key.to_bytes().as_slice(), &expected_bytes); } + #[test] + fn test_derive_epsilon_canton_stays_the_same() { + let expected_canton_v0 = Scalar::from_bytes([ + 0xA4, 0xCF, 0xD1, 0x98, 0x07, 0xD1, 0x96, 0x8D, 0xAA, 0xDA, 0x88, 0xB5, 0xB8, 0x12, + 0xAD, 0x61, 0xC6, 0x24, 0x08, 0xB4, 0x84, 0xB5, 0x51, 0xFC, 0x37, 0x30, 0x34, 0x51, + 0x03, 0x14, 0x61, 0x4C, + ]) + .unwrap(); + + let expected_canton_v1 = Scalar::from_bytes([ + 0x49, 0x05, 0x93, 0xA1, 0x00, 0xEA, 0xE1, 0x26, 0x98, 0x8F, 0x3B, 0xA4, 0xEC, 0x3A, + 0xBD, 0x75, 0x4C, 0xD2, 0x4C, 0xD9, 0xA6, 0x6B, 0x14, 0x71, 0x27, 0x6A, 0x1B, 0xC3, + 0xE3, 0x10, 0xCA, 0xBD, + ]) + .unwrap(); + + assert_eq!(derive_epsilon_canton(0, "sender", "path"), expected_canton_v0); + assert_eq!(derive_epsilon_canton(1, "sender", "path"), expected_canton_v1); + } + // This logic is used to determine MPC PK (address) that is set as admin in Ethereum contract #[test] fn derive_ethereum_admin_key() { diff --git a/chain-signatures/crypto/src/lib.rs b/chain-signatures/crypto/src/lib.rs index 5bcd4e6bd..24dff05a5 100644 --- a/chain-signatures/crypto/src/lib.rs +++ b/chain-signatures/crypto/src/lib.rs @@ -3,8 +3,8 @@ pub mod kdf; use k256::elliptic_curve::sec1::FromEncodedPoint; use k256::EncodedPoint; pub use kdf::{ - check_ec_signature, derive_epsilon_eth, derive_epsilon_near, derive_epsilon_sol, derive_key, - x_coordinate, + check_ec_signature, derive_epsilon_canton, derive_epsilon_eth, derive_epsilon_hydration, + derive_epsilon_near, derive_epsilon_sol, derive_key, x_coordinate, }; pub use mpc_primitives::{PublicKey, ScalarExt}; diff --git a/chain-signatures/node/Cargo.toml b/chain-signatures/node/Cargo.toml index 492378475..1c8fbdbee 100644 --- a/chain-signatures/node/Cargo.toml +++ b/chain-signatures/node/Cargo.toml @@ -78,7 +78,10 @@ solana-sdk.workspace = true solana-transaction-status.workspace = true solana-client.workspace = true futures-util.workspace = true +jsonwebtoken.workspace = true +p256.workspace = true parity-scale-codec.workspace = true +tokio-tungstenite.workspace = true near-account-id.workspace = true near-crypto = "0.26.0" @@ -86,6 +89,7 @@ near-fetch = "0.6.0" near-primitives.workspace = true near-sdk.workspace = true +canton-types.workspace = true mpc-contract.workspace = true mpc-crypto.workspace = true mpc-keys.workspace = true diff --git a/chain-signatures/node/src/backlog/mod.rs b/chain-signatures/node/src/backlog/mod.rs index 0f400fb53..738bc8638 100644 --- a/chain-signatures/node/src/backlog/mod.rs +++ b/chain-signatures/node/src/backlog/mod.rs @@ -824,7 +824,7 @@ mod tests { use super::*; use crate::{ protocol::SignKind, - sign_bidirectional::{BidirectionalTx, BidirectionalTxId, SignStatus}, + sign_bidirectional::{BidirectionalTx, BidirectionalTxId, ChainContext, SignStatus}, stream::ops::SignBidirectionalEvent, }; use alloy::primitives::{Address, B256}; @@ -851,6 +851,7 @@ mod tests { from_address: Address::ZERO, nonce: 0, status, + chain_ctx: ChainContext::None, } } diff --git a/chain-signatures/node/src/cli.rs b/chain-signatures/node/src/cli.rs index 5db1f40c9..7312f8c06 100644 --- a/chain-signatures/node/src/cli.rs +++ b/chain-signatures/node/src/cli.rs @@ -14,7 +14,9 @@ use crate::rpc::{ContractStateWatcher, NearClient, RpcExecutor}; use crate::storage::checkpoint_storage::CheckpointStorage; use crate::storage::triple_storage::TriplePair; use crate::stream::run_stream; -use crate::{indexer, indexer_eth, indexer_hydration, indexer_sol, logs, mesh, storage, web}; +use crate::{ + indexer, indexer_canton, indexer_eth, indexer_hydration, indexer_sol, logs, mesh, storage, web, +}; use clap::Parser; use deadpool_redis::Runtime; @@ -69,6 +71,9 @@ pub enum Cli { /// Hydration Indexer options #[clap(flatten)] hydration: indexer_hydration::HydrationArgs, + /// Canton Indexer options + #[clap(flatten)] + canton: indexer_canton::CantonArgs, /// NEAR requests options #[clap(flatten)] indexer_options: indexer::Options, @@ -112,6 +117,7 @@ impl Cli { eth, sol, hydration, + canton, indexer_options, my_address, storage_options, @@ -159,6 +165,7 @@ impl Cli { args.extend(eth.into_str_args()); args.extend(sol.into_str_args()); args.extend(hydration.into_str_args()); + args.extend(canton.into_str_args()); args.extend(indexer_options.into_str_args()); args.extend(storage_options.into_str_args()); args.extend(log_options.into_str_args()); @@ -183,6 +190,7 @@ pub async fn run(cmd: Cli) -> anyhow::Result<()> { eth, sol, hydration, + canton, indexer_options, my_address, storage_options, @@ -292,12 +300,14 @@ pub async fn run(cmd: Cli) -> anyhow::Result<()> { let eth = eth.into_config(); let sol = sol.into_config(); let hydration = hydration.into_config(); + let canton = canton.into_config(); let network = NetworkConfig { cipher_sk, sign_sk }; let near_client = NearClient::new(&near_rpc, &my_address, &network, &mpc_contract_id, signer); let (rpc_channel, rpc) = - RpcExecutor::new(&near_client, ð, &sol, &hydration, backlog.clone()).await; + RpcExecutor::new(&near_client, ð, &sol, &hydration, &canton, backlog.clone()) + .await; let (sync_channel, sync) = SyncTask::new( &client, @@ -325,6 +335,7 @@ pub async fn run(cmd: Cli) -> anyhow::Result<()> { sol_signer_address = %sol_payer_address.as_deref().unwrap_or("None"), hydration_rpc_url = %hydration.as_ref().map(|h| h.rpc_ws_url.as_str()).unwrap_or("None"), hydration_signer_address = %hydration_signer_address.as_deref().unwrap_or("None"), + canton_json_api_url = %canton.as_ref().map(|c| c.json_api_url.as_str()).unwrap_or("None"), "starting node", ); @@ -414,12 +425,22 @@ pub async fn run(cmd: Cli) -> anyhow::Result<()> { } tokio::spawn(indexer_hydration::run( hydration, - sign_tx, - backlog, - contract_watcher, - mesh_state, - client, + sign_tx.clone(), + backlog.clone(), + contract_watcher.clone(), + mesh_state.clone(), + client.clone(), )); + if let Some(canton_stream) = indexer_canton::CantonStream::new(canton, backlog.clone()) { + tokio::spawn(run_stream( + canton_stream, + sign_tx.clone(), + backlog.clone(), + contract_watcher.clone(), + mesh_state.clone(), + client.clone(), + )); + } tracing::info!("protocol http server spawned"); protocol_handle.await?; web_handle.await?; diff --git a/chain-signatures/node/src/indexer_canton/mod.rs b/chain-signatures/node/src/indexer_canton/mod.rs new file mode 100644 index 000000000..35ecae142 --- /dev/null +++ b/chain-signatures/node/src/indexer_canton/mod.rs @@ -0,0 +1,972 @@ +use crate::backlog::Backlog; +use mpc_primitives::MAX_SECP256K1_SCALAR; +use crate::protocol::Chain; +use crate::sign_bidirectional::hash_rlp_data; +use crate::stream::ops::{ + RespondBidirectionalEvent, SignBidirectionalEvent, SignatureEvent, SignatureRespondedEvent, +}; +use crate::stream::{ChainEvent, ChainStream}; + +use alloy::consensus::TxEip1559; +use alloy::primitives::{keccak256, Address, B256, Bytes, TxKind, U256}; +use canton_types::{contracts, ledger_api}; +use futures_util::{SinkExt, StreamExt}; +use std::collections::HashSet; +use jsonwebtoken::{encode, Algorithm, EncodingKey, Header}; +use k256::Scalar; +use mpc_primitives::{ScalarExt, SignArgs, SignId, Signature, LATEST_MPC_KEY_VERSION}; +use std::fmt; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use tokio_tungstenite::tungstenite::http::header; +use tokio_tungstenite::tungstenite::Message; + +// --------------------------------------------------------------------------- +// Canton event structs +// --------------------------------------------------------------------------- + +pub use canton_types::contracts::EvmTransactionParams as CantonEvmTransactionParams; +pub use canton_types::contracts::SignBidirectionalRequestedEvent as CantonSignBidirectionalRequestedEvent; + +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct CantonRespondBidirectionalEvent { + pub request_id: [u8; 32], + pub responder: String, + pub serialized_output: Vec, + pub signature: Signature, +} +// NOTE: No Hash derive — Signature contains k256 types that don't impl Hash + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct CantonSignatureRespondedEvent { + pub request_id: [u8; 32], + pub responder: String, + pub signature: Signature, +} +// NOTE: No Hash, PartialEq, Eq derives — matches HydrationSignatureRespondedEvent + +// --------------------------------------------------------------------------- +// JWT token generation (ES256) +// --------------------------------------------------------------------------- + +#[derive(serde::Serialize)] +struct JwtClaims { + sub: String, + /// Canton supports scope-based OR audience-based tokens, not both. + /// We use scope-based (the default when no target-audience is configured). + scope: String, + iat: u64, + exp: u64, +} + +/// Generate a JWT using a pre-parsed EncodingKey. +pub(crate) fn generate_jwt_with_key(key: &EncodingKey, subject: &str) -> anyhow::Result { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_secs(); + let claims = JwtClaims { + sub: subject.to_string(), + scope: "daml_ledger_api".to_string(), + iat: now, + exp: now + 300, + }; + let header = Header::new(Algorithm::ES256); + Ok(encode(&header, &claims, &key)?) +} + +// --------------------------------------------------------------------------- +// Flat keccak256 request ID computation (matches Daml/TS reference) +// --------------------------------------------------------------------------- + +/// keccak256(utf8(text)), or keccak256("") for empty string. +/// Mirrors Daml's `hashText` in Eip712.daml. +fn hash_text(text: &str) -> [u8; 32] { + keccak256(text.as_bytes()).into() +} + +/// Left-pad a hex string to 32 bytes (big-endian U256). +fn pad_left_32(hex_str: &str) -> [u8; 32] { + let stripped = hex_str.strip_prefix("0x").unwrap_or(hex_str); + U256::from_str_radix(stripped, 16) + .unwrap_or(U256::ZERO) + .to_be_bytes::<32>() +} + +/// keccak256(concat(map keccak256 items)), or keccak256("") for empty list. +/// Mirrors Daml's `hashBytesList` in Eip712.daml. +fn hash_bytes_list(items: &[String]) -> [u8; 32] { + if items.is_empty() { + return keccak256(b"").into(); + } + let mut concatenated = Vec::new(); + for item in items { + let bytes = hex::decode(item).unwrap_or_default(); + let h: [u8; 32] = keccak256(&bytes).into(); + concatenated.extend_from_slice(&h); + } + keccak256(&concatenated).into() +} + +/// Hash EvmTransactionParams — mirrors Daml's `hashEvmParams` in RequestId.daml. +fn hash_evm_params(p: &CantonEvmTransactionParams) -> [u8; 32] { + let mut buf = Vec::with_capacity(9 * 32); + buf.extend_from_slice(&pad_left_32(&p.to)); + buf.extend_from_slice(&hash_text(&p.function_signature)); + buf.extend_from_slice(&hash_bytes_list(&p.args)); + buf.extend_from_slice(&pad_left_32(&p.value)); + buf.extend_from_slice(&pad_left_32(&p.nonce)); + buf.extend_from_slice(&pad_left_32(&p.gas_limit)); + buf.extend_from_slice(&pad_left_32(&p.max_fee_per_gas)); + buf.extend_from_slice(&pad_left_32(&p.max_priority_fee)); + buf.extend_from_slice(&pad_left_32(&p.chain_id)); + keccak256(&buf).into() +} + +/// Compute the request ID using flat keccak256(concat(hashed fields)). +/// Mirrors Daml's `computeRequestId` in RequestId.daml. +fn compute_request_id(event: &CantonSignBidirectionalRequestedEvent) -> [u8; 32] { + let key_version_hex = format!("{:x}", event.key_version); + + let mut buf = Vec::with_capacity(9 * 32); + buf.extend_from_slice(&hash_text(&event.sender)); + buf.extend_from_slice(&hash_evm_params(&event.evm_tx_params)); + buf.extend_from_slice(&hash_text(&event.caip2_id)); + buf.extend_from_slice(&pad_left_32(&key_version_hex)); + buf.extend_from_slice(&hash_text(&event.path)); + buf.extend_from_slice(&hash_text(&event.algo)); + buf.extend_from_slice(&hash_text(&event.dest)); + buf.extend_from_slice(&hash_text(&event.params)); + buf.extend_from_slice(&hash_text(&event.nonce_cid_text)); + keccak256(&buf).into() +} + +// --------------------------------------------------------------------------- +// RLP encoding of unsigned EIP-1559 transaction +// --------------------------------------------------------------------------- + +/// Build calldata from function signature and args. +/// calldata = keccak256("function " + sig)[0..4] ++ concat(args) +fn build_calldata(function_signature: &str, args: &[String]) -> Vec { + // EVM selector = first 4 bytes of keccak256 of the bare signature, + // e.g. "transfer(address,uint256)", NOT prefixed with "function ". + let selector: [u8; 4] = keccak256(function_signature.as_bytes()).0[..4] + .try_into() + .unwrap(); + let mut calldata = selector.to_vec(); + for arg in args { + calldata.extend_from_slice(&hex::decode(arg).unwrap_or_default()); + } + calldata +} + +/// Convert Canton EvmTransactionParams to an alloy TxEip1559. +pub fn to_tx_eip1559(p: &CantonEvmTransactionParams) -> anyhow::Result { + let to_bytes = hex::decode(&p.to)?; + // Canton pads to 64 hex chars (32 bytes) — take last 20 for the address + let addr_bytes = if to_bytes.len() > 20 { + &to_bytes[to_bytes.len() - 20..] + } else { + &to_bytes + }; + + Ok(TxEip1559 { + chain_id: u64::from_str_radix(&p.chain_id, 16).unwrap_or(0), + nonce: u64::from_str_radix(&p.nonce, 16).unwrap_or(0), + gas_limit: u64::from_str_radix(&p.gas_limit, 16).unwrap_or(0), + max_fee_per_gas: u128::from_str_radix(&p.max_fee_per_gas, 16).unwrap_or(0), + max_priority_fee_per_gas: u128::from_str_radix(&p.max_priority_fee, 16).unwrap_or(0), + to: TxKind::Call(Address::from_slice(addr_bytes)), + value: U256::from_str_radix(&p.value, 16).unwrap_or(U256::ZERO), + input: Bytes::from(build_calldata(&p.function_signature, &p.args)), + access_list: Default::default(), + }) +} + +/// RLP-encode an unsigned EIP-1559 transaction using alloy. +pub fn rlp_encode_unsigned_eip1559(params: &CantonEvmTransactionParams) -> Vec { + match to_tx_eip1559(params) { + Ok(tx) => { + use alloy::consensus::transaction::SignableTransaction; + let mut out = Vec::new(); + tx.encode_for_signing(&mut out); + out + } + Err(e) => { + tracing::warn!(%e, "failed to build TxEip1559 from Canton params"); + vec![] + } + } +} + +// --------------------------------------------------------------------------- +// DER signature encoding +// --------------------------------------------------------------------------- + +/// DER-encode an ECDSA signature from an MPC Signature (big_r, s). +/// +/// Canton's native Daml signature verification (`secp256k1WithEcdsaOnly`) +/// only accepts DER-encoded signatures — there is no built-in Daml function +/// to convert from raw `(r, s)` components to DER. We encode on the MPC +/// side so the Daml contracts can verify directly without conversion. +pub fn der_encode_signature(signature: &Signature) -> anyhow::Result> { + use mpc_crypto::x_coordinate; + + let r_scalar = x_coordinate(&signature.big_r); + let ecdsa_sig = k256::ecdsa::Signature::from_scalars(r_scalar, &signature.s) + .map_err(|e| anyhow::anyhow!("failed to create ECDSA signature from (r, s) scalars: {e}"))?; + Ok(ecdsa_sig.to_der().to_bytes().to_vec()) +} + +// --------------------------------------------------------------------------- +// SignatureEvent impl for Canton sign bidirectional +// --------------------------------------------------------------------------- + +impl SignatureEvent for CantonSignBidirectionalRequestedEvent { + fn generate_request_id(&self) -> [u8; 32] { + compute_request_id(self) + } + + fn generate_sign_request(&self, entropy: [u8; 32]) -> anyhow::Result { + tracing::info!("found canton event: {:?}", self); + + if self.key_version > LATEST_MPC_KEY_VERSION { + tracing::warn!("unsupported key version: {}", self.key_version); + anyhow::bail!("unsupported key version"); + } + + let request_id = self.generate_request_id(); + + let epsilon = mpc_crypto::kdf::derive_epsilon_canton( + self.key_version, + &self.sender, + &self.path, + ); + + let rlp_encoded_tx = rlp_encode_unsigned_eip1559(&self.evm_tx_params); + let unsigned_tx_hash = hash_rlp_data(rlp_encoded_tx); + + let Some(payload) = Scalar::from_bytes(unsigned_tx_hash) else { + anyhow::bail!( + "failed to convert unsigned_tx_hash to scalar: {unsigned_tx_hash:?}" + ); + }; + + if payload > *MAX_SECP256K1_SCALAR { + tracing::warn!("payload exceeds secp256k1 curve order: {payload:?}"); + anyhow::bail!("payload exceeds secp256k1 curve order"); + } + + let sign_id = SignId::new(request_id); + tracing::info!(?sign_id, "canton signature requested"); + + Ok(crate::protocol::IndexedSignRequest::sign_bidirectional( + sign_id, + SignArgs { + entropy, + epsilon, + payload, + path: self.path.clone(), + key_version: self.key_version, + }, + Chain::Canton, + crate::util::current_unix_timestamp(), + SignBidirectionalEvent::Canton(self.clone()), + )) + } + + fn source_chain(&self) -> Chain { + Chain::Canton + } + + fn sender_string(&self) -> String { + self.sender.clone() + } +} + +// --------------------------------------------------------------------------- +// Configuration & CLI args +// --------------------------------------------------------------------------- + +/// Canton JSON Ledger API configuration. +#[derive(Clone)] +pub struct CantonConfig { + pub json_api_url: String, + pub json_api_ws_url: String, + pub jwt_private_key_path: String, + pub jwt_subject: String, + pub party_id: String, + /// The Signer contract ID on the Canton ledger. Must be updated if the contract is re-deployed. + pub signer_contract_id: String, + /// The full template ID of the Signer contract (e.g. ":Signer:Signer"). + /// Must be updated if the DAR is upgraded. + pub signer_template_id: String, +} + +impl fmt::Debug for CantonConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CantonConfig") + .field("json_api_url", &self.json_api_url) + .field("json_api_ws_url", &self.json_api_ws_url) + .field("jwt_private_key_path", &"") + .field("jwt_subject", &self.jwt_subject) + .field("party_id", &self.party_id) + .field("signer_contract_id", &self.signer_contract_id) + .field("signer_template_id", &self.signer_template_id) + .finish() + } +} + +/// CLI arguments for the Canton indexer. +#[derive(Debug, Clone, clap::Parser)] +#[group(id = "indexer_canton_options")] +pub struct CantonArgs { + #[arg( + long, + env("MPC_CANTON_JSON_API_URL"), + requires_all = [ + "canton_json_api_ws_url", + "canton_jwt_private_key_path", + "canton_jwt_subject", + "canton_party_id", + "canton_signer_contract_id", + "canton_signer_template_id", + ] + )] + pub canton_json_api_url: Option, + #[arg(long, env("MPC_CANTON_JSON_API_WS_URL"), requires = "canton_json_api_url")] + pub canton_json_api_ws_url: Option, + #[arg(long, env("MPC_CANTON_JWT_PRIVATE_KEY_PATH"), requires = "canton_json_api_url")] + pub canton_jwt_private_key_path: Option, + #[arg(long, env("MPC_CANTON_JWT_SUBJECT"), requires = "canton_json_api_url")] + pub canton_jwt_subject: Option, + #[arg(long, env("MPC_CANTON_PARTY_ID"), requires = "canton_json_api_url")] + pub canton_party_id: Option, + /// The Signer contract ID on the Canton ledger. Must be updated if the contract is re-deployed. + #[arg(long, env("MPC_CANTON_SIGNER_CONTRACT_ID"), requires = "canton_json_api_url")] + pub canton_signer_contract_id: Option, + /// The full template ID of the Signer contract (e.g. ":Signer:Signer"). + /// Must be updated if the DAR is upgraded. + #[arg(long, env("MPC_CANTON_SIGNER_TEMPLATE_ID"), requires = "canton_json_api_url")] + pub canton_signer_template_id: Option, +} + +impl CantonArgs { + pub fn into_str_args(self) -> Vec { + let mut args = Vec::with_capacity(16); + if let Some(v) = self.canton_json_api_url { + args.extend(["--canton-json-api-url".to_string(), v]); + } + if let Some(v) = self.canton_json_api_ws_url { + args.extend(["--canton-json-api-ws-url".to_string(), v]); + } + if let Some(v) = self.canton_jwt_private_key_path { + args.extend(["--canton-jwt-private-key-path".to_string(), v]); + } + if let Some(v) = self.canton_jwt_subject { + args.extend(["--canton-jwt-subject".to_string(), v]); + } + if let Some(v) = self.canton_party_id { + args.extend(["--canton-party-id".to_string(), v]); + } + if let Some(v) = self.canton_signer_contract_id { + args.extend(["--canton-signer-contract-id".to_string(), v]); + } + if let Some(v) = self.canton_signer_template_id { + args.extend(["--canton-signer-template-id".to_string(), v]); + } + args + } + + pub fn into_config(self) -> Option { + Some(CantonConfig { + json_api_url: self.canton_json_api_url?, + json_api_ws_url: self.canton_json_api_ws_url?, + jwt_private_key_path: self.canton_jwt_private_key_path?, + jwt_subject: self.canton_jwt_subject?, + party_id: self.canton_party_id?, + signer_contract_id: self.canton_signer_contract_id?, + signer_template_id: self.canton_signer_template_id?, + }) + } + + pub fn from_config(config: Option) -> Self { + match config { + Some(c) => CantonArgs { + canton_json_api_url: Some(c.json_api_url), + canton_json_api_ws_url: Some(c.json_api_ws_url), + canton_jwt_private_key_path: Some(c.jwt_private_key_path), + canton_jwt_subject: Some(c.jwt_subject), + canton_party_id: Some(c.party_id), + + canton_signer_contract_id: Some(c.signer_contract_id), + canton_signer_template_id: Some(c.signer_template_id), + }, + None => CantonArgs { + canton_json_api_url: None, + canton_json_api_ws_url: None, + canton_jwt_private_key_path: None, + canton_jwt_subject: None, + canton_party_id: None, + + canton_signer_contract_id: None, + canton_signer_template_id: None, + }, + } + } +} + +// --------------------------------------------------------------------------- +// Signer CID discovery +// --------------------------------------------------------------------------- + +/// Discover the Signer contract ID by querying active contracts. +/// Returns (contractId, templateId) for the unique Signer:Signer contract. +pub async fn discover_signer_cid( + http_client: &reqwest::Client, + json_api_url: &str, + jwt_token: &str, + party_id: &str, +) -> anyhow::Result<(String, String)> { + let url = format!("{json_api_url}/v2/state/active-contracts"); + + let mut filters_by_party = serde_json::Map::new(); + filters_by_party.insert(party_id.to_string(), serde_json::json!({})); + + let body = ledger_api::GetActiveContractsRequest { + active_at_offset: 0, + event_format: ledger_api::EventFormat { + filters_by_party, + verbose: false, + }, + }; + + let resp = http_client + .post(&url) + .bearer_auth(jwt_token) + .json(&body) + .send() + .await?; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + anyhow::bail!("active-contracts query failed: {status} {text}"); + } + + let items: Vec = resp.json().await?; + + let mut signer_contracts: Vec<(String, String)> = Vec::new(); + for item in &items { + if let Some(ledger_api::ContractEntry::JsActiveContract(active)) = &item.contract_entry { + let ce = &active.created_event; + if ledger_api::template_suffix_matches(&ce.template_id, ledger_api::templates::SIGNER) { + signer_contracts.push((ce.contract_id.clone(), ce.template_id.clone())); + } + } + } + + match signer_contracts.as_slice() { + [] => anyhow::bail!("no active Signer:Signer contract found"), + [single] => Ok(single.clone()), + _ => anyhow::bail!("expected 1 Signer:Signer contract, found {}", signer_contracts.len()), + } +} + +// --------------------------------------------------------------------------- +// WebSocket event stream +// --------------------------------------------------------------------------- + +struct CantonStreamStartState { + config: CantonConfig, + tx: mpsc::Sender, + backlog: Backlog, +} + +pub struct CantonStream { + rx: mpsc::Receiver, + start_state: Option, + tasks: Vec>, +} + +impl Drop for CantonStream { + fn drop(&mut self) { + for task in &self.tasks { + task.abort(); + } + } +} + +impl CantonStream { + pub fn new(config: Option, backlog: Backlog) -> Option { + let config = match config { + Some(c) => c, + None => { + tracing::warn!("canton indexer is disabled"); + return None; + } + }; + + let (tx, rx) = crate::stream::channel(); + + Some(CantonStream { + rx, + start_state: Some(CantonStreamStartState { + config, + tx, + backlog, + }), + tasks: Vec::new(), + }) + } +} + +impl ChainStream for CantonStream { + const CHAIN: Chain = Chain::Canton; + + async fn start(&mut self) { + let Some(state) = self.start_state.take() else { + return; + }; + + let config = state.config; + let tx = state.tx; + let backlog = state.backlog; + + self.tasks.push(tokio::spawn(async move { + run_canton_event_loop(config, tx, backlog).await; + })); + } + + async fn next_event(&mut self) -> Option { + self.rx.recv().await + } +} + +/// Main event loop with reconnection logic. +async fn run_canton_event_loop( + config: CantonConfig, + tx: mpsc::Sender, + backlog: Backlog, +) { + // Read PEM once at startup and parse the key (no re-parsing per reconnect) + let encoding_key = match tokio::fs::read(&config.jwt_private_key_path).await { + Ok(pem) => match EncodingKey::from_ec_pem(&pem) { + Ok(key) => key, + Err(e) => { + tracing::error!(%e, "failed to parse canton JWT private key — canton indexer disabled"); + return; + } + }, + Err(e) => { + tracing::error!(%e, "failed to read canton JWT private key — canton indexer disabled"); + return; + } + }; + + // Seed counter from backlog checkpoint + let mut counter = backlog + .processed_block(Chain::Canton) + .await + .unwrap_or(0); + + tracing::info!( + initial_offset = counter, + "canton event loop starting" + ); + + loop { + match subscribe_and_process(&config, &encoding_key, &tx, &mut counter).await { + Ok(()) => { + tracing::info!("canton WebSocket stream ended cleanly, reconnecting..."); + } + Err(e) => { + tracing::warn!(%e, "canton WebSocket error, reconnecting in 1s..."); + } + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } +} + +/// Connect to the Canton WebSocket, subscribe, and process events until disconnection. +async fn subscribe_and_process( + config: &CantonConfig, + encoding_key: &EncodingKey, + tx: &mpsc::Sender, + counter: &mut u64, +) -> anyhow::Result<()> { + let jwt_token = generate_jwt_with_key(encoding_key, &config.jwt_subject)?; + + let ws_url = format!("{}/v2/updates", config.json_api_ws_url); + + // Build request with subprotocol header + let mut request = ws_url.into_client_request()?; + request.headers_mut().insert( + header::SEC_WEBSOCKET_PROTOCOL, + "daml.ws.auth".parse()?, + ); + request.headers_mut().insert( + header::AUTHORIZATION, + format!("Bearer {jwt_token}").parse()?, + ); + + let (ws_stream, _) = tokio::time::timeout( + std::time::Duration::from_secs(30), + tokio_tungstenite::connect_async(request), + ) + .await + .map_err(|_| anyhow::anyhow!("canton WebSocket connect timed out"))??; + let (mut write, mut read) = ws_stream.split(); + + tracing::info!("canton WebSocket connected"); + + // Send subscription message using updateFormat (Canton 3.4+). + // TRANSACTION_SHAPE_LEDGER_EFFECTS gives us ExercisedEvent which we use + // to verify the SignBidirectional choice was exercised on a Signer:Signer. + let mut filters_by_party = serde_json::Map::new(); + filters_by_party.insert(config.party_id.clone(), serde_json::json!({})); + + let subscribe_msg = ledger_api::GetUpdatesRequest { + begin_exclusive: *counter, + update_format: ledger_api::UpdateFormat { + include_transactions: ledger_api::TransactionFormat { + transaction_shape: "TRANSACTION_SHAPE_LEDGER_EFFECTS".to_string(), + event_format: ledger_api::EventFormatInline { + filters_by_party, + verbose: true, + }, + }, + }, + }; + write + .send(Message::Text(serde_json::to_string(&subscribe_msg)?.into())) + .await?; + + // Process incoming messages with stall watchdog (matches Solana pattern) + let stall_timeout = std::time::Duration::from_secs(60); + let mut last_ws_msg = tokio::time::Instant::now(); + let mut watchdog = tokio::time::interval(std::time::Duration::from_secs(5)); + + loop { + let msg = tokio::select! { + maybe = read.next() => { + match maybe { + Some(msg) => { + last_ws_msg = tokio::time::Instant::now(); + msg? + } + None => break, + } + } + _ = watchdog.tick() => { + if last_ws_msg.elapsed() > stall_timeout { + anyhow::bail!("canton WebSocket stalled: no message for {stall_timeout:?}"); + } + continue; + } + }; + let text = match msg { + Message::Text(t) => t, + // tokio-tungstenite auto-sends pong replies; manual Pong would double-respond + Message::Close(_) => { + tracing::info!("canton WebSocket received close frame"); + break; + } + _ => continue, + }; + + let msg: ledger_api::UpdateMessage = match serde_json::from_str(&text) { + Ok(v) => v, + Err(e) => { + tracing::warn!(%e, "failed to parse canton WebSocket message"); + continue; + } + }; + + match msg.update { + Some(ledger_api::Update::Transaction { value }) => { + *counter = value.offset; + + for event in &value.events { + process_canton_event(event, &value.events, tx, &config.party_id, &config.signer_template_id).await; + } + + // Emit Block event for checkpoint tracking + if tx.send(ChainEvent::Block(*counter)).await.is_err() { + tracing::error!("canton event channel closed"); + return Ok(()); + } + } + Some(ledger_api::Update::OffsetCheckpoint { value }) => { + *counter = value.offset; + if tx.send(ChainEvent::Block(*counter)).await.is_err() { + tracing::error!("canton event channel closed"); + return Ok(()); + } + } + None => { + if msg.error.is_some() { + tracing::warn!(error = ?msg.error, "canton ledger stream error"); + } + } + } + } + + Ok(()) +} + +/// Process a single Canton event from a WebSocket transaction update. +/// +/// `tx_events` is the full list of events in the transaction, used for +/// defense-in-depth verification (signatory checks, ExercisedEvent check). +async fn process_canton_event( + event: &ledger_api::Event, + tx_events: &[ledger_api::Event], + tx: &mpsc::Sender, + node_party_id: &str, + signer_template_id: &str, +) { + let created = match event { + ledger_api::Event::CreatedEvent(created) => created, + ledger_api::Event::ArchivedEvent(_) | ledger_api::Event::ExercisedEvent(_) => return, + }; + + let template_id = &created.template_id; + + if ledger_api::template_suffix_matches(template_id, ledger_api::templates::SIGN_BIDIRECTIONAL_EVENT) { + match parse_sign_bidirectional_event(created) { + Ok(canton_event) => { + if let Err(e) = verify_sign_event(&canton_event, created, tx_events, node_party_id, signer_template_id) { + tracing::error!(%e, "canton SignBidirectionalEvent failed verification — dropping"); + return; + } + + let request_id = canton_event.generate_request_id(); + let entropy: [u8; 32] = keccak256(request_id).into(); + let boxed: crate::stream::ops::SignatureEventBox = Box::new(canton_event); + match boxed.generate_sign_request(entropy) { + Ok(indexed) => { + if tx.send(ChainEvent::SignRequest(indexed)).await.is_err() { + tracing::error!("canton event channel closed"); + return; + } + } + Err(e) => { + tracing::warn!(%e, "failed to generate canton sign request"); + } + } + } + Err(e) => { + tracing::warn!(%e, "failed to parse SignBidirectionalEvent"); + } + } + } else if ledger_api::template_suffix_matches(template_id, ledger_api::templates::SIGNATURE_RESPONDED_EVENT) { + match parse_signature_responded_event(created) { + Ok(responded) => { + let event = SignatureRespondedEvent::Canton(responded); + if tx.send(ChainEvent::Respond(event)).await.is_err() { + tracing::error!("canton event channel closed"); + return; + } + } + Err(e) => { + tracing::warn!(%e, "failed to parse SignatureRespondedEvent"); + } + } + } else if ledger_api::template_suffix_matches(template_id, ledger_api::templates::RESPOND_BIDIRECTIONAL_EVENT) { + match parse_respond_bidirectional_event(created) { + Ok(respond) => { + let event = RespondBidirectionalEvent::Canton(respond); + if tx.send(ChainEvent::RespondBidirectional(event)).await.is_err() { + tracing::error!("canton event channel closed"); + return; + } + } + Err(e) => { + tracing::warn!(%e, "failed to parse RespondBidirectionalEvent"); + } + } + } +} + +// --------------------------------------------------------------------------- +// Defense-in-depth verification (mirrors canton-mpc-poc TS tx-handler.ts) +// --------------------------------------------------------------------------- + +/// Verify a SignBidirectionalEvent before processing it. +/// +/// These checks are defense-in-depth on top of the Daml ledger guarantees: +/// 1. Operators from the payload must be actual signatories on the CreatedEvent +/// 2. Requester must be a signatory +/// 3. An ExercisedEvent with choice "SignBidirectional" on Signer:Signer must +/// exist in the same transaction — proves the event was created through the +/// correct Daml code path, not fabricated +fn verify_sign_event( + event: &contracts::SignBidirectionalRequestedEvent, + created: &ledger_api::CreatedEvent, + tx_events: &[ledger_api::Event], + node_party_id: &str, + signer_template_id: &str, +) -> anyhow::Result<()> { + // Check 0: sig_network must match this node's party ID + if event.sig_network != node_party_id { + anyhow::bail!( + "sig_network {} does not match node party_id {node_party_id} — event is for a different MPC network", + event.sig_network + ); + } + + let signatories: HashSet<&str> = created.signatories.iter().map(|s| s.as_str()).collect(); + + // Check 1: operators must be signatories (hard error) + for op in &event.operators { + if !signatories.contains(op.as_str()) { + anyhow::bail!( + "operator {op} is in contract payload but not in CreatedEvent.signatories — possible forgery" + ); + } + } + + // Check 2: requester must be a signatory (hard error) + if !signatories.contains(event.requester.as_str()) { + anyhow::bail!( + "requester {} is not in CreatedEvent.signatories — possible forgery", + event.requester + ); + } + + // Check 3: ExercisedEvent with choice "SignBidirectional" on the pinned + // Signer template must exist in the same transaction. Exact template ID + // match (not suffix) since the operator pinned it via CLI. + let has_exercise = tx_events.iter().any(|e| matches!( + e, + ledger_api::Event::ExercisedEvent(ex) + if ex.choice == "SignBidirectional" && ex.template_id == signer_template_id + )); + if !has_exercise { + anyhow::bail!( + "no ExercisedEvent with choice SignBidirectional on {signer_template_id} found in transaction" + ); + } + + // Check 4: nonceCidText must correspond to a consuming ExercisedEvent on a + // SigningNonce template in the same transaction. With LEDGER_EFFECTS, nonce + // archival appears as a consuming exercise (not an ArchivedEvent). + // This ensures: (a) the nonce was actually consumed (replay prevention), + // and (b) it's a SigningNonce — not an arbitrary string. + let nonce_cid = &event.nonce_cid_text; + if nonce_cid.is_empty() { + anyhow::bail!("nonceCidText is empty — malformed SignBidirectionalEvent"); + } + let nonce_consumed = tx_events.iter().any(|e| matches!( + e, + ledger_api::Event::ExercisedEvent(ex) + if ex.consuming + && ex.contract_id == *nonce_cid + && ledger_api::template_suffix_matches(&ex.template_id, ledger_api::templates::SIGNING_NONCE) + )); + if !nonce_consumed { + anyhow::bail!( + "nonceCidText {nonce_cid} does not match any consuming ExercisedEvent on SigningNonce in the transaction — possible replay or forged nonce" + ); + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Event parsing from Canton JSON payloads +// --------------------------------------------------------------------------- + +fn parse_sign_bidirectional_event( + created: &ledger_api::CreatedEvent, +) -> anyhow::Result { + let event: contracts::SignBidirectionalRequestedEvent = + serde_json::from_value(created.payload.clone())?; + Ok(event) +} + +fn parse_signature_responded_event( + created: &ledger_api::CreatedEvent, +) -> anyhow::Result { + let payload: contracts::SignatureRespondedEventPayload = + serde_json::from_value(created.payload.clone())?; + + let request_id: [u8; 32] = payload.request_id.parse::() + .map_err(|e| anyhow::anyhow!("invalid request_id hex: {e}"))?.0; + let signature = parse_der_signature(&payload.signature)?; + + Ok(CantonSignatureRespondedEvent { + request_id, + responder: payload.responder, + signature, + }) +} + +fn parse_respond_bidirectional_event( + created: &ledger_api::CreatedEvent, +) -> anyhow::Result { + let payload: contracts::RespondBidirectionalEventPayload = + serde_json::from_value(created.payload.clone())?; + + let request_id: [u8; 32] = payload.request_id.parse::() + .map_err(|e| anyhow::anyhow!("invalid request_id hex: {e}"))?.0; + let serialized_output = hex::decode(&payload.serialized_output) + .map_err(|e| anyhow::anyhow!("invalid serializedOutput hex: {e}"))?; + let signature = parse_der_signature(&payload.signature)?; + + Ok(CantonRespondBidirectionalEvent { + request_id, + responder: payload.responder, + serialized_output, + signature, + }) +} + +/// Parse a DER-encoded ECDSA signature (hex string) back into an MPC Signature. +/// +/// Canton emits signatures in DER format (see [`der_encode_signature`] for why). +/// We extract r and s via k256's DER parser, then reconstruct `big_r` by +/// decompressing the r scalar as a secp256k1 x-coordinate with even parity. +/// +/// **Important:** DER does not encode the recovery ID (y-parity). The returned +/// `recovery_id` defaults to `0` (even parity) and may be incorrect for ~50% +/// of signatures. Callers that need the correct recovery ID must determine it +/// themselves by recovering the public key from the message hash — see +/// [`crate::kdf::into_signature`] for the canonical approach. +fn parse_der_signature(hex_str: &str) -> anyhow::Result { + use k256::elliptic_curve::sec1::FromEncodedPoint; + use k256::EncodedPoint; + + let stripped = hex_str.strip_prefix("0x").unwrap_or(hex_str); + let bytes = hex::decode(stripped) + .map_err(|e| anyhow::anyhow!("invalid DER hex: {e}"))?; + + let ecdsa_sig = k256::ecdsa::Signature::from_der(&bytes) + .map_err(|e| anyhow::anyhow!("invalid DER signature: {e}"))?; + + let (r_scalar, s_scalar) = ecdsa_sig.split_scalars(); + let r_bytes = r_scalar.to_bytes(); + let s_bytes: [u8; 32] = s_scalar.to_bytes().into(); + let s = ::from_bytes(s_bytes) + .ok_or_else(|| anyhow::anyhow!("s is not a valid scalar"))?; + + // Reconstruct big_r from the x-coordinate with even parity (0x02). + // Both parities always yield valid secp256k1 points, so the old loop + // that checked `from_encoded_point` was a no-op — it always took the + // first branch. The actual recovery_id must be resolved later against + // the expected public key and message hash. + let mut compressed = [0u8; 33]; + compressed[0] = 0x02; // even parity + compressed[1..].copy_from_slice(&r_bytes); + let encoded = EncodedPoint::from_bytes(&compressed) + .map_err(|e| anyhow::anyhow!("r is not valid compressed point bytes: {e}"))?; + let big_r = Option::from(k256::AffinePoint::from_encoded_point(&encoded)) + .ok_or_else(|| anyhow::anyhow!("r is not a valid point on secp256k1"))?; + + Ok(Signature { + big_r, + s, + recovery_id: 0, + }) +} diff --git a/chain-signatures/node/src/lib.rs b/chain-signatures/node/src/lib.rs index d099fbce3..5f8485588 100644 --- a/chain-signatures/node/src/lib.rs +++ b/chain-signatures/node/src/lib.rs @@ -7,6 +7,7 @@ pub mod gcp; pub mod indexer; pub mod indexer_eth; +pub mod indexer_canton; pub mod indexer_hydration; pub mod indexer_sol; pub mod kdf; diff --git a/chain-signatures/node/src/respond_bidirectional.rs b/chain-signatures/node/src/respond_bidirectional.rs index 8b63c8757..62658bd0b 100644 --- a/chain-signatures/node/src/respond_bidirectional.rs +++ b/chain-signatures/node/src/respond_bidirectional.rs @@ -1,7 +1,6 @@ use crate::indexer_eth::EthereumClient; use crate::protocol::{Chain, IndexedSignRequest}; -use crate::sign_bidirectional::BidirectionalTx; -use crate::sign_bidirectional::BidirectionalTxId; +use crate::sign_bidirectional::{BidirectionalTx, BidirectionalTxId, ChainContext}; use crate::sign_bidirectional::TransactionOutput; use alloy::consensus::Transaction; use alloy::primitives::Bytes; @@ -13,8 +12,7 @@ use std::sync::Arc; const MAGIC_ERROR_PREFIX: [u8; 4] = [0xde, 0xad, 0xbe, 0xef]; const SOLANA_RESPOND_BIDIRECTIONAL_PATH: &str = "solana response key"; const HYDRATION_RESPOND_BIDIRECTIONAL_PATH: &str = "hydration response key"; -// Use Borsh as this is what we are using for solana -pub(crate) const RESPOND_SERIALIZATION_FORMAT: SerDeserFormat = SerDeserFormat::Borsh; +const CANTON_RESPOND_BIDIRECTIONAL_PATH: &str = "canton response key"; // Use Abi as this is what we are using for ethereum pub(crate) const OUTPUT_DESERIALIZATION_FORMAT: SerDeserFormat = SerDeserFormat::Abi; @@ -24,6 +22,14 @@ pub enum SerDeserFormat { Abi, } +fn respond_serialization_format(chain: Chain) -> SerDeserFormat { + match chain { + Chain::Canton => SerDeserFormat::Abi, + // Solana, Hydration use Borsh + _ => SerDeserFormat::Borsh, + } +} + pub struct CompletedTx { tx: BidirectionalTx, block_number: u64, @@ -33,6 +39,8 @@ pub struct CompletedTx { pub struct RespondBidirectionalTx { pub tx_id: BidirectionalTxId, pub output: RespondBidirectionalSerializedOutput, + #[serde(default)] + pub chain_ctx: ChainContext, } pub type RespondBidirectionalSerializedOutput = Vec; @@ -60,7 +68,7 @@ impl CompletedTx { async fn process_failed_tx(&self, chain: Chain) -> anyhow::Result { tracing::info!("Tx failed: {:?}", self.tx.id); - let respond_serialization_format = RESPOND_SERIALIZATION_FORMAT; + let respond_serialization_format = respond_serialization_format(chain); let mut output = Vec::new(); output.extend_from_slice(&MAGIC_ERROR_PREFIX); let serialized_output: Vec = match respond_serialization_format { @@ -105,6 +113,7 @@ impl CompletedTx { let path = match chain { Chain::Solana => SOLANA_RESPOND_BIDIRECTIONAL_PATH.to_string(), Chain::Hydration => HYDRATION_RESPOND_BIDIRECTIONAL_PATH.to_string(), + Chain::Canton => CANTON_RESPOND_BIDIRECTIONAL_PATH.to_string(), _ => anyhow::bail!("Unsupported chain: {}", chain), }; let epsilon = self.tx.epsilon(&path)?; @@ -123,6 +132,7 @@ impl CompletedTx { RespondBidirectionalTx { tx_id: self.tx.id, output: serialized_output, + chain_ctx: self.tx.chain_ctx.clone(), }, )) } @@ -157,7 +167,7 @@ impl CompletedTx { _ => TransactionOutput::non_function_call_output(), }; - let respond_serialization_format = RESPOND_SERIALIZATION_FORMAT; + let respond_serialization_format = respond_serialization_format(tx.source_chain); let respond_serialization_schema = &tx.respond_serialization_schema; let serialized_output = transaction_output .output diff --git a/chain-signatures/node/src/rpc.rs b/chain-signatures/node/src/rpc.rs index 7ee4b881a..961fb03b0 100644 --- a/chain-signatures/node/src/rpc.rs +++ b/chain-signatures/node/src/rpc.rs @@ -43,6 +43,7 @@ use std::time::{Duration, Instant}; use tokio::sync::{mpsc, watch}; use url::Url; +use crate::indexer_canton::CantonConfig; use crate::indexer_hydration::HydrationConfig; use parity_scale_codec::{Decode, Encode}; use subxt::config::substrate::{ @@ -353,6 +354,7 @@ pub struct RpcExecutor { eth: Option, solana: Option, hydration: Option, + canton: Option, action_rx: mpsc::Receiver, backlog: Backlog, } @@ -363,6 +365,7 @@ impl RpcExecutor { eth: &Option, solana: &Option, hydration: &Option, + canton: &Option, backlog: Backlog, ) -> (RpcChannel, Self) { let eth = eth.as_ref().map(EthClient::new); @@ -377,6 +380,16 @@ impl RpcExecutor { }, None => None, }; + let canton = match canton { + Some(c) => match CantonClient::new(c).await { + Ok(client) => Some(client), + Err(e) => { + tracing::error!(%e, "failed to create canton client"); + None + } + }, + None => None, + }; let (tx, rx) = mpsc::channel(MAX_CONCURRENT_RPC_REQUESTS); ( RpcChannel { tx }, @@ -385,6 +398,7 @@ impl RpcExecutor { eth, solana, hydration, + canton, action_rx: rx, backlog, }, @@ -433,7 +447,7 @@ impl RpcExecutor { tokio::spawn(async move { match chain { - Chain::NEAR | Chain::Solana | Chain::Hydration => { + Chain::NEAR | Chain::Solana | Chain::Hydration | Chain::Canton => { execute_publish(client, action, backlog).await; } Chain::Ethereum => { @@ -477,6 +491,13 @@ impl RpcExecutor { ChainClient::Err("no hydration client available for node") } } + Chain::Canton => { + if let Some(canton) = &self.canton { + ChainClient::Canton(canton.clone()) + } else { + ChainClient::Err("no canton client available for node") + } + } Chain::Bitcoin => ChainClient::Err("no bitcoin client available for node"), } } @@ -893,6 +914,65 @@ impl HydrationClient { } } +#[derive(Clone)] +pub struct CantonClient { + http_client: reqwest::Client, + json_api_url: String, + /// Pre-parsed encoding key — parsed once at construction, reused for every JWT. + encoding_key: jsonwebtoken::EncodingKey, + jwt_subject: String, + party_id: String, + signer_cid: String, + /// The full templateId of the discovered Signer contract (includes package hash). + signer_template_id: String, +} + +impl std::fmt::Debug for CantonClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CantonClient") + .field("json_api_url", &self.json_api_url) + .field("encoding_key", &"") + .field("jwt_subject", &self.jwt_subject) + .field("party_id", &self.party_id) + .field("signer_cid", &self.signer_cid) + .field("signer_template_id", &self.signer_template_id) + .finish() + } +} + +impl CantonClient { + pub async fn new(config: &CantonConfig) -> anyhow::Result { + let http_client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build()?; + + let jwt_pem = tokio::fs::read(&config.jwt_private_key_path).await?; + let encoding_key = jsonwebtoken::EncodingKey::from_ec_pem(&jwt_pem)?; + + tracing::info!( + signer_cid = %config.signer_contract_id, + signer_template_id = %config.signer_template_id, + "canton Signer contract configured" + ); + + Ok(Self { + http_client, + json_api_url: config.json_api_url.clone(), + encoding_key, + jwt_subject: config.jwt_subject.clone(), + party_id: config.party_id.clone(), + signer_cid: config.signer_contract_id.clone(), + signer_template_id: config.signer_template_id.clone(), + }) + } + + /// Generate a fresh JWT token using the pre-parsed encoding key. + fn generate_jwt(&self) -> anyhow::Result { + crate::indexer_canton::generate_jwt_with_key(&self.encoding_key, &self.jwt_subject) + } + +} + /// Client related to a specific chain #[allow(clippy::large_enum_variant)] pub enum ChainClient { @@ -901,6 +981,7 @@ pub enum ChainClient { Ethereum(EthClient), Solana(SolanaClient), Hydration(HydrationClient), + Canton(CantonClient), } async fn update_contract(near: NearClient, contract: watch::Sender>) { @@ -971,6 +1052,11 @@ async fn execute_publish(client: ChainClient, action: PublishAction, backlog: Ba .await .map_err(|_| ()) } + ChainClient::Canton(canton) => { + try_publish_canton(canton, &action, &action.timestamp, &action.signature) + .await + .map_err(|_| ()) + } ChainClient::Err(msg) => { tracing::error!(msg, "no client for chain"); Ok(()) @@ -1547,6 +1633,10 @@ async fn execute_batch_publish(client: &ChainClient, actions: &mut Vec { + tracing::error!("Canton does not support batch publish"); + Ok(()) + } ChainClient::Err(msg) => { tracing::error!(msg, "no client for chain"); Ok(()) @@ -1764,6 +1854,155 @@ async fn try_publish_hydration( Ok(()) } +async fn try_publish_canton( + canton: &CantonClient, + action: &PublishAction, + timestamp: &std::time::Instant, + signature: &mpc_primitives::Signature, +) -> anyhow::Result<()> { + let chain = action.indexed.chain; + let sign_id = action.indexed.id; + let request_id_hex = hex::encode(action.indexed.id.request_id); + + tracing::info!( + ?sign_id, + ?chain, + elapsed = ?timestamp.elapsed(), + request_id = %request_id_hex, + "canton: publishing signature" + ); + + let jwt_token = canton.generate_jwt()?; + let url = format!( + "{}/v2/commands/submit-and-wait-for-transaction", + canton.json_api_url + ); + let der_sig = hex::encode(crate::indexer_canton::der_encode_signature(signature)?); + + match &action.indexed.kind { + SignKind::SignBidirectional(_) => { + // Extract operators and requester from the Canton event + let (operators, requester) = extract_canton_operators_requester(action)?; + + let body = serde_json::json!({ + "commands": { + "commands": [{ + "ExerciseCommand": { + "templateId": canton.signer_template_id, + "contractId": canton.signer_cid, + "choice": "Respond", + "choiceArgument": { + "operators": operators, + "requester": requester, + "requestId": request_id_hex, + "signature": der_sig, + } + } + }], + "commandId": format!("mpc-respond-{}", request_id_hex), + "userId": canton.jwt_subject, + "actAs": [&canton.party_id], + "readAs": [&canton.party_id], + } + }); + + let resp = canton + .http_client + .post(&url) + .bearer_auth(&jwt_token) + .json(&body) + .send() + .await?; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + anyhow::bail!("canton Respond failed: {status} {text}"); + } + + tracing::info!( + ?sign_id, + elapsed = ?timestamp.elapsed(), + "published canton Respond signature successfully" + ); + } + SignKind::RespondBidirectional(respond_bidirectional_tx) => { + let (operators, requester) = extract_canton_operators_requester(action)?; + let serialized_output = hex::encode(&respond_bidirectional_tx.output); + + let body = serde_json::json!({ + "commands": { + "commands": [{ + "ExerciseCommand": { + "templateId": canton.signer_template_id, + "contractId": canton.signer_cid, + "choice": "RespondBidirectional", + "choiceArgument": { + "operators": operators, + "requester": requester, + "requestId": request_id_hex, + "serializedOutput": serialized_output, + "signature": der_sig, + } + } + }], + "commandId": format!("mpc-respond-bidir-{}", request_id_hex), + "userId": canton.jwt_subject, + "actAs": [&canton.party_id], + "readAs": [&canton.party_id], + } + }); + + let resp = canton + .http_client + .post(&url) + .bearer_auth(&jwt_token) + .json(&body) + .send() + .await?; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + anyhow::bail!("canton RespondBidirectional failed: {status} {text}"); + } + + tracing::info!( + ?sign_id, + elapsed = ?timestamp.elapsed(), + "published canton RespondBidirectional successfully" + ); + } + SignKind::Sign => { + anyhow::bail!("Canton does not support SignKind::Sign — only SignBidirectional"); + } + } + + Ok(()) +} + +/// Extract operators and requester (sender) from a Canton publish action. +/// Phase 1 (Respond): reads from the CantonSignBidirectionalRequestedEvent on SignKind. +/// Phase 2 (RespondBidirectional): reads from the BidirectionalTx fields populated +/// when the tx was first created from the Canton sign event. +fn extract_canton_operators_requester( + action: &PublishAction, +) -> anyhow::Result<(Vec, String)> { + use crate::sign_bidirectional::ChainContext; + match &action.indexed.kind { + SignKind::SignBidirectional( + crate::stream::ops::SignBidirectionalEvent::Canton(event), + ) => Ok((event.operators.clone(), event.requester.clone())), + SignKind::RespondBidirectional(respond_tx) => { + let ChainContext::Canton { ref operators, ref requester, .. } = respond_tx.chain_ctx else { + anyhow::bail!("missing ChainContext on RespondBidirectionalTx"); + }; + Ok((operators.clone(), requester.clone())) + } + _ => anyhow::bail!("expected Canton event variant"), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/chain-signatures/node/src/sign_bidirectional.rs b/chain-signatures/node/src/sign_bidirectional.rs index 2596f9e41..1c5e1d6a2 100644 --- a/chain-signatures/node/src/sign_bidirectional.rs +++ b/chain-signatures/node/src/sign_bidirectional.rs @@ -2,14 +2,18 @@ use crate::protocol::{Chain, IndexedSignRequest}; use crate::respond_bidirectional::SerDeserFormat; use alloy::primitives::{keccak256, Address, Bytes, B256, I256, U256}; use alloy_dyn_abi::{DynSolType, DynSolValue}; +use anyhow::Context; use borsh::BorshSerialize; +use k256::ecdsa::RecoveryId; use k256::elliptic_curve::point::AffineCoordinates; +use k256::elliptic_curve::sec1::ToEncodedPoint; use k256::{AffinePoint, Scalar}; -use mpc_crypto::derive_key; +use mpc_crypto::{derive_key, kdf::recover}; use mpc_primitives::Signature; use rlp::{Rlp, RlpStream}; use serde_json::Value; use sha3::{Digest, Keccak256}; + use std::collections::HashMap; use std::io::Write; @@ -43,6 +47,23 @@ pub enum SignStatus { Success, } +/// Chain-specific context carried through the bidirectional signing flow. +#[derive(Debug, Clone, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub enum ChainContext { + None, + Canton { + operators: Vec, + requester: String, + sender: String, + }, +} + +impl Default for ChainContext { + fn default() -> Self { + Self::None + } +} + #[derive(Debug, Clone, Hash, serde::Serialize, serde::Deserialize)] pub struct BidirectionalTx { pub id: BidirectionalTxId, @@ -63,6 +84,8 @@ pub struct BidirectionalTx { pub from_address: Address, pub nonce: u64, pub status: SignStatus, + #[serde(default)] + pub chain_ctx: ChainContext, } impl BidirectionalTx { @@ -82,6 +105,16 @@ impl BidirectionalTx { &self.sender_string()?, path, )), + Chain::Canton => { + let ChainContext::Canton { ref sender, .. } = self.chain_ctx else { + anyhow::bail!("Canton BidirectionalTx missing ChainContext"); + }; + Ok(mpc_crypto::kdf::derive_epsilon_canton( + self.key_version, + sender, + path, + )) + } _ => anyhow::bail!("Unsupported chain: {}", self.source_chain), } } @@ -224,6 +257,42 @@ pub fn decode_rlp(rlp_data: Vec, is_eip1559: bool) -> anyhow::Result anyhow::Result { + let msg_hash: [u8; 32] = keccak256(unsigned_rlp).into(); + let expected_pk = derived_public_key.to_encoded_point(false); + + let ecdsa_sig = + k256::ecdsa::Signature::from_scalars( + mpc_crypto::x_coordinate(&signature.big_r), + &signature.s, + ) + .context("cannot create ECDSA signature from (r, s) scalars")?; + + for rid in [0u8, 1u8] { + let recovery_id = RecoveryId::try_from(rid) + .with_context(|| format!("cannot create recovery_id={rid}"))?; + if let Ok(recovered) = recover(&msg_hash, &ecdsa_sig, recovery_id) { + if expected_pk == recovered.to_encoded_point(false) { + signature.recovery_id = rid; + return Ok(signature); + } + } + } + + anyhow::bail!("cannot determine recovery_id: neither 0 nor 1 recovers the expected public key") +} + pub fn sign_and_hash_transaction( unsigned_rlp: &[u8], signature: Signature, @@ -435,7 +504,7 @@ fn serialize_dynsol(w: &mut W, v: &DynSolValue) -> anyhow::Result<()> } } - // -------- Tuple -------- + // -------- Tuple -------- // Concatenate members Tuple(xs) => { for x in xs { diff --git a/chain-signatures/node/src/stream/ops.rs b/chain-signatures/node/src/stream/ops.rs index f00b580c9..f5ad43b4b 100644 --- a/chain-signatures/node/src/stream/ops.rs +++ b/chain-signatures/node/src/stream/ops.rs @@ -1,4 +1,8 @@ use crate::backlog::{Backlog, BacklogEntry, RecoveredChainRequests, RecoveryRequeueMode}; +use crate::indexer_canton::{ + CantonRespondBidirectionalEvent, CantonSignBidirectionalRequestedEvent, + CantonSignatureRespondedEvent, +}; use crate::indexer_hydration::{ HydrationRespondBidirectionalEvent, HydrationSignBidirectionalRequestedEvent, HydrationSignatureRespondedEvent, @@ -9,9 +13,10 @@ use crate::node_client::NodeClient; use crate::protocol::{Chain, IndexedSignRequest, Sign, SignKind}; use crate::respond_bidirectional::CompletedTx; use crate::rpc::ContractStateWatcher; -use crate::sign_bidirectional::{BidirectionalTx, BidirectionalTxId, SignStatus}; +use crate::sign_bidirectional::{BidirectionalTx, BidirectionalTxId, ChainContext, SignStatus}; use crate::stream::ExecutionOutcome; +use alloy::primitives::keccak256; use anchor_lang::prelude::Pubkey; use k256::Scalar; use mpc_primitives::{SignId, Signature}; @@ -22,6 +27,7 @@ use tokio::sync::{mpsc, watch}; pub enum SignBidirectionalEvent { Solana(signet_program::SignBidirectionalEvent), Hydration(HydrationSignBidirectionalRequestedEvent), + Canton(CantonSignBidirectionalRequestedEvent), } impl SignBidirectionalEvent { @@ -29,6 +35,9 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.sender.to_bytes(), SignBidirectionalEvent::Hydration(event) => event.sender, + SignBidirectionalEvent::Canton(event) => { + keccak256(event.sender.as_bytes()).into() + } } } @@ -40,6 +49,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(_) => Chain::Solana, SignBidirectionalEvent::Hydration(_) => Chain::Hydration, + SignBidirectionalEvent::Canton(_) => Chain::Canton, } } @@ -47,6 +57,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => Chain::from_str(&event.dest).ok(), SignBidirectionalEvent::Hydration(event) => Chain::from_str(&event.dest).ok(), + SignBidirectionalEvent::Canton(event) => Chain::from_str(&event.dest).ok(), } } @@ -54,6 +65,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.path.clone(), SignBidirectionalEvent::Hydration(event) => event.path.clone(), + SignBidirectionalEvent::Canton(event) => event.path.clone(), } } @@ -61,6 +73,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.dest.clone(), SignBidirectionalEvent::Hydration(event) => event.dest.clone(), + SignBidirectionalEvent::Canton(event) => event.dest.clone(), } } @@ -68,6 +81,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.algo.clone(), SignBidirectionalEvent::Hydration(event) => event.algo.clone(), + SignBidirectionalEvent::Canton(event) => event.algo.clone(), } } @@ -75,6 +89,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.params.clone(), SignBidirectionalEvent::Hydration(event) => event.params.clone(), + SignBidirectionalEvent::Canton(event) => event.params.clone(), } } @@ -82,6 +97,9 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.output_deserialization_schema.clone(), SignBidirectionalEvent::Hydration(event) => event.output_deserialization_schema.clone(), + SignBidirectionalEvent::Canton(event) => { + event.output_deserialization_schema.as_bytes().to_vec() + } } } @@ -89,6 +107,9 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.respond_serialization_schema.clone(), SignBidirectionalEvent::Hydration(event) => event.respond_serialization_schema.clone(), + SignBidirectionalEvent::Canton(event) => { + event.respond_serialization_schema.as_bytes().to_vec() + } } } @@ -96,6 +117,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.key_version, SignBidirectionalEvent::Hydration(event) => event.key_version, + SignBidirectionalEvent::Canton(event) => event.key_version, } } @@ -103,6 +125,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.deposit, SignBidirectionalEvent::Hydration(event) => event.deposit, + SignBidirectionalEvent::Canton(_) => 0, } } @@ -110,6 +133,9 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.serialized_transaction.clone(), SignBidirectionalEvent::Hydration(event) => event.serialized_transaction.clone(), + SignBidirectionalEvent::Canton(event) => { + crate::indexer_canton::rlp_encode_unsigned_eip1559(&event.evm_tx_params) + } } } @@ -117,6 +143,7 @@ impl SignBidirectionalEvent { match self { SignBidirectionalEvent::Solana(event) => event.caip2_id.clone(), SignBidirectionalEvent::Hydration(event) => event.caip2_id.clone(), + SignBidirectionalEvent::Canton(event) => event.caip2_id.clone(), } } @@ -132,6 +159,11 @@ impl SignBidirectionalEvent { &self.sender_string()?, &self.path(), )), + SignBidirectionalEvent::Canton(event) => Ok(mpc_crypto::kdf::derive_epsilon_canton( + self.key_version(), + &event.sender, + &self.path(), + )), } } } @@ -139,6 +171,7 @@ impl SignBidirectionalEvent { pub enum RespondBidirectionalEvent { Solana(signet_program::RespondBidirectionalEvent), Hydration(HydrationRespondBidirectionalEvent), + Canton(CantonRespondBidirectionalEvent), } impl RespondBidirectionalEvent { @@ -146,6 +179,7 @@ impl RespondBidirectionalEvent { match self { RespondBidirectionalEvent::Solana(event) => event.request_id, RespondBidirectionalEvent::Hydration(event) => event.request_id, + RespondBidirectionalEvent::Canton(event) => event.request_id, } } @@ -153,6 +187,9 @@ impl RespondBidirectionalEvent { match self { RespondBidirectionalEvent::Solana(event) => event.responder.to_bytes(), RespondBidirectionalEvent::Hydration(event) => event.responder, + RespondBidirectionalEvent::Canton(event) => { + keccak256(event.responder.as_bytes()).into() + } } } @@ -160,6 +197,7 @@ impl RespondBidirectionalEvent { match self { RespondBidirectionalEvent::Solana(event) => event.serialized_output.clone(), RespondBidirectionalEvent::Hydration(event) => event.serialized_output.clone(), + RespondBidirectionalEvent::Canton(event) => event.serialized_output.clone(), } } @@ -169,6 +207,7 @@ impl RespondBidirectionalEvent { crate::indexer_sol::to_mpc_signature(event.signature.clone()).unwrap() } RespondBidirectionalEvent::Hydration(event) => event.signature, + RespondBidirectionalEvent::Canton(event) => event.signature, } } @@ -176,6 +215,7 @@ impl RespondBidirectionalEvent { match self { RespondBidirectionalEvent::Solana(_) => Chain::Solana, RespondBidirectionalEvent::Hydration(_) => Chain::Hydration, + RespondBidirectionalEvent::Canton(_) => Chain::Canton, } } } @@ -195,6 +235,7 @@ pub enum SignatureRespondedEvent { /// Minimal Ethereum respond event representation (used to emit Respond events /// from the Ethereum indexer without performing backlog mutations in the client). Ethereum(EthereumSignatureRespondedEvent), + Canton(CantonSignatureRespondedEvent), } impl SignatureRespondedEvent { @@ -203,6 +244,7 @@ impl SignatureRespondedEvent { SignatureRespondedEvent::Solana(_) => Chain::Solana, SignatureRespondedEvent::Hydration(_) => Chain::Hydration, SignatureRespondedEvent::Ethereum(_) => Chain::Ethereum, + SignatureRespondedEvent::Canton(_) => Chain::Canton, } } @@ -211,10 +253,11 @@ impl SignatureRespondedEvent { SignatureRespondedEvent::Solana(event) => event.request_id, SignatureRespondedEvent::Hydration(event) => event.request_id, SignatureRespondedEvent::Ethereum(event) => event.request_id, + SignatureRespondedEvent::Canton(event) => event.request_id, } } - /// Convert the contained event into an `mpc_primitives::Signature`. + /// Convert the contained event into an `mpc_primitives::Signature`. pub fn signature(&self) -> Signature { match self { SignatureRespondedEvent::Solana(event) => { @@ -222,6 +265,7 @@ impl SignatureRespondedEvent { } SignatureRespondedEvent::Hydration(event) => event.signature, SignatureRespondedEvent::Ethereum(event) => event.signature, + SignatureRespondedEvent::Canton(event) => event.signature, } } } @@ -392,7 +436,26 @@ pub(crate) async fn process_respond_event( anyhow::anyhow!("unable to parse target chain from dest: {}", event.dest()) })?; - let mpc_sig = respond_event.signature(); + // Get the MPC public key and derive the from_address. + // This must happen before sign_and_hash_transaction for Canton signatures + // because DER encoding loses the recovery ID — we need the derived public + // key to resolve the correct parity via ecrecover. + let root_public_key = contract_watcher.wait_public_key().await; + let epsilon = event.epsilon()?; + let derived_public_key = mpc_crypto::derive_key(root_public_key, epsilon); + let from_address = crate::sign_bidirectional::derive_user_address(root_public_key, epsilon); + + let mut mpc_sig = respond_event.signature(); + + // For Canton signatures the recovery_id parsed from DER defaults to 0 + // and may be wrong. Resolve the correct value before hashing. + if source_chain == Chain::Canton { + mpc_sig = crate::sign_bidirectional::resolve_signature_recovery_id( + &event.serialized_transaction(), + mpc_sig, + &derived_public_key, + )?; + } // Sign and hash the transaction to get the correct tx_id and nonce let (signed_tx_hash, nonce) = crate::sign_bidirectional::sign_and_hash_transaction( @@ -402,11 +465,6 @@ pub(crate) async fn process_respond_event( let tx_id = BidirectionalTxId(signed_tx_hash.into()); - // Get the MPC public key and derive the from_address - let root_public_key = contract_watcher.wait_public_key().await; - let epsilon = event.epsilon()?; - let from_address = crate::sign_bidirectional::derive_user_address(root_public_key, epsilon); - let bidirectional_tx = BidirectionalTx { id: tx_id, sender: event.sender(), @@ -426,6 +484,14 @@ pub(crate) async fn process_respond_event( from_address, nonce, status: SignStatus::AwaitingResponse, + chain_ctx: match &event { + SignBidirectionalEvent::Canton(e) => ChainContext::Canton { + operators: e.operators.clone(), + requester: e.requester.clone(), + sender: e.sender.clone(), + }, + _ => ChainContext::None, + }, }; tracing::info!( @@ -523,7 +589,7 @@ pub async fn process_execution_confirmed( tracing::warn!(?tx_id, expected = ?unwatched_sign_id, actual = ?sign_id, "sign_id mismatch between event and watcher"); } - // Update the status on the source chain + // Update the status on the source chain let status = match result { ExecutionOutcome::Success { .. } => SignStatus::Success, ExecutionOutcome::Failed => SignStatus::Failed, @@ -569,6 +635,7 @@ pub(crate) fn sender_string(sender: [u8; 32], source_chain: Chain) -> anyhow::Re Chain::Hydration => Ok(crate::indexer_hydration::ss58_address_from_account32( sender, )), + Chain::Canton => Ok(hex::encode(sender)), _ => anyhow::bail!("Unsupported chain: {source_chain}"), } } @@ -718,6 +785,7 @@ mod tests { from_address: Address::ZERO, nonce: 0, status: SignStatus::PendingExecution, + chain_ctx: ChainContext::None, }; let sign_id = SignId::new(tx.request_id); @@ -894,6 +962,7 @@ mod tests { from_address: Address::ZERO, nonce: 0, status: SignStatus::PendingExecution, + chain_ctx: ChainContext::None, }; let sign_id = SignId::new(tx.request_id); diff --git a/chain-signatures/primitives/src/lib.rs b/chain-signatures/primitives/src/lib.rs index e835f9bfb..80d165639 100644 --- a/chain-signatures/primitives/src/lib.rs +++ b/chain-signatures/primitives/src/lib.rs @@ -6,6 +6,7 @@ use near_account_id::AccountId; use near_sdk::borsh::{BorshDeserialize, BorshSerialize}; use near_sdk::serde::{Deserialize, Serialize}; use sha3::Digest; +use std::sync::LazyLock; use std::{fmt, str::FromStr}; use crate::bytes::cbor_scalar; @@ -35,6 +36,17 @@ impl ScalarExt for Scalar { } } +/// The maximum valid scalar for the secp256k1 curve (group order minus one). +pub static MAX_SECP256K1_SCALAR: LazyLock = LazyLock::new(|| { + Scalar::from_bytes( + hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140") + .unwrap() + .try_into() + .unwrap(), + ) + .unwrap() +}); + pub const LATEST_MPC_KEY_VERSION: u32 = 1; pub const LEGACY_MPC_KEY_VERSION_0: u32 = 0; @@ -155,6 +167,7 @@ pub enum Chain { Solana, Bitcoin, Hydration, + Canton, } impl Chain { @@ -165,16 +178,18 @@ impl Chain { Chain::Solana => "Solana", Chain::Bitcoin => "Bitcoin", Chain::Hydration => "Hydration", + Chain::Canton => "Canton", } } - pub const fn iter() -> [Chain; 5] { + pub const fn iter() -> [Chain; 6] { [ Chain::NEAR, Chain::Ethereum, Chain::Solana, Chain::Bitcoin, Chain::Hydration, + Chain::Canton, ] } @@ -185,6 +200,7 @@ impl Chain { Chain::Solana => "0x800001f5", Chain::Bitcoin => "bip122:000000000019d6689c085ae165831e93", Chain::Hydration => "polkadot:2034", + Chain::Canton => "canton:global", } } @@ -195,6 +211,10 @@ impl Chain { Chain::Solana => "solana:5eykt4UsFv8P8NJdTREpY1vzqKqZKvdp", Chain::Bitcoin => "bip122:000000000019d6689c085ae165831e93", Chain::Hydration => "polkadot:2034", + // Synthetic — Canton has no registered CAIP-2 namespace in + // ChainAgnostic/namespaces. "canton:global" follows the + // namespace:reference format as a project-local identifier. + Chain::Canton => "canton:global", } } @@ -204,6 +224,7 @@ impl Chain { Chain::Ethereum => ("CHECKPOINT_INTERVAL_ETHEREUM", 20), Chain::Solana => ("CHECKPOINT_INTERVAL_SOLANA", 120), Chain::Hydration => ("CHECKPOINT_INTERVAL_HYDRATION", 240), + Chain::Canton => ("CHECKPOINT_INTERVAL_CANTON", 50), }; let interval = std::env::var(key) @@ -218,6 +239,7 @@ impl Chain { ("CHECKPOINT_INTERVAL_ETHEREUM", "2"), ("CHECKPOINT_INTERVAL_SOLANA", "5"), ("CHECKPOINT_INTERVAL_HYDRATION", "5"), + ("CHECKPOINT_INTERVAL_CANTON", "5"), ] } @@ -228,6 +250,7 @@ impl Chain { Chain::Solana => 3, Chain::Bitcoin => 60 * 60 + 20 * 60, // 6 confirmations at 10 minutes each, plus some buffer Chain::Hydration => 12, + Chain::Canton => 5, } } @@ -252,6 +275,7 @@ impl FromStr for Chain { "solana" | "sol" => Ok(Chain::Solana), "bitcoin" | "btc" => Ok(Chain::Bitcoin), "hydration" | "hyd" => Ok(Chain::Hydration), + "canton" | "ctn" => Ok(Chain::Canton), other => Err(format!("unknown or unsupported chain {other}")), } } diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 27980a60b..4bd875b8c 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -36,6 +36,7 @@ uuid = { version = "1.0", features = ["v4"] } sha3.workspace = true sha2 = "0.10" rlp = "0.5" +jsonwebtoken = { workspace = true } # solana dependencies anchor-client.workspace = true @@ -66,6 +67,7 @@ near-primitives = "0.26.0" near-workspaces = "0.15.0" # local chain-signatures dependencies +canton-types.workspace = true mpc-crypto.workspace = true mpc-contract.workspace = true mpc-keys.workspace = true diff --git a/integration-tests/fixtures/canton/daml-vault-0.0.1.dar b/integration-tests/fixtures/canton/daml-vault-0.0.1.dar new file mode 100644 index 000000000..9929f257d Binary files /dev/null and b/integration-tests/fixtures/canton/daml-vault-0.0.1.dar differ diff --git a/integration-tests/src/canton.rs b/integration-tests/src/canton.rs new file mode 100644 index 000000000..5a46ab9eb --- /dev/null +++ b/integration-tests/src/canton.rs @@ -0,0 +1,787 @@ +use anyhow::{Context as _, Result}; +use async_process::{Child, Command}; +use canton_types::ledger_api::{ + self, ActiveContractEntry, AllocatePartyRequest, AllocatePartyResponse, ContractEntry, + CreateUserRequest, DisclosedContract, EventFormat, GetActiveContractsRequest, JsCommands, + LedgerEndResponse, SubmitAndWaitForTransactionRequest, SubmitAndWaitForTransactionResponse, + UserInfo, +}; +use mpc_node::indexer_canton::CantonConfig; +use serde_json::{json, Value}; +use std::path::PathBuf; +use std::time::Duration; + +const CANTON_JSON_API_PORT: u16 = 7575; +const DEFAULT_DAR_RELATIVE_PATH: &str = "fixtures/canton/daml-vault-0.0.1.dar"; + +// --------------------------------------------------------------------------- +// JWT auth material generation +// --------------------------------------------------------------------------- + +pub struct JwtAuthMaterial { + pub private_key_pem: String, + pub key_path: PathBuf, + pub cert_path: PathBuf, + pub auth_conf_path: PathBuf, +} + +/// Generate P-256 private key + self-signed X.509 cert + HOCON auth config. +fn generate_jwt_auth_material() -> Result { + let tmp_dir = std::env::temp_dir(); + let id = uuid::Uuid::new_v4(); + let key_path = tmp_dir.join(format!("canton-jwt-{id}.key")); + let cert_path = tmp_dir.join(format!("canton-jwt-{id}.crt")); + let auth_conf_path = tmp_dir.join(format!("canton-auth-{id}.conf")); + + let output = std::process::Command::new("openssl") + .args([ + "req", + "-x509", + "-noenc", + "-days", + "3650", + "-newkey", + "ec", + "-pkeyopt", + "ec_paramgen_curve:prime256v1", + "-keyout", + &key_path.to_string_lossy(), + "-out", + &cert_path.to_string_lossy(), + "-subj", + "/CN=mpc-test-node", + ]) + .output() + .context("openssl not found — needed to generate JWT cert")?; + anyhow::ensure!(output.status.success(), "openssl cert generation failed"); + + let private_key_pem = std::fs::read_to_string(&key_path)?; + + // JWT auth on ledger-api only. The admin-api stays unauthenticated. + let conf = format!( + r#"canton.participants.sandbox.ledger-api {{ + auth-services = [ + {{ type = jwt-es-256-crt, certificate = "{}" }} + ] + jwt-timestamp-leeway.default = 10 +}}"#, + cert_path.to_string_lossy() + ); + std::fs::write(&auth_conf_path, &conf)?; + + Ok(JwtAuthMaterial { + private_key_pem, + key_path, + cert_path, + auth_conf_path, + }) +} + +// --------------------------------------------------------------------------- +// CantonSandbox +// --------------------------------------------------------------------------- + +/// A running Canton sandbox process with JWT auth and deployed Daml contracts. +pub struct CantonSandbox { + process: Child, + jwt_key_path: PathBuf, + jwt_cert_path: PathBuf, + auth_conf_path: PathBuf, + pub json_api_url: String, + pub json_api_ws_url: String, + pub jwt_private_key_pem: String, + pub jwt_subject: String, + pub party_id: String, + pub operator_party: String, + pub requester_party: String, + pub signer_cid: String, + pub signer_template_id: String, + pub vault_cid: String, + pub vault_disclosure: Value, + pub signer_disclosure: Value, + pub nonce_cid: String, + pub client: CantonTestClient, +} + +impl CantonSandbox { + pub async fn run() -> Result { + // 0. Wait for ALL Canton ports to be free (previous sandbox may still be + // shutting down). Canton binds 7575 (JSON API), 6865 (gRPC), 6868 (sequencer). + for port in [CANTON_JSON_API_PORT, 6865, 6868] { + let mut released = false; + for i in 0..40 { + match tokio::net::TcpStream::connect(("127.0.0.1", port)).await { + Ok(_) => { + if i % 10 == 0 { + tracing::debug!("waiting for port {port} to be free (attempt {i})..."); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + Err(_) => { + released = true; + break; + } + } + } + anyhow::ensure!(released, "port {port} still in use after 20s — previous Canton did not exit"); + } + + // 1. Check dpm is available + let output = Command::new("dpm").arg("--version").output().await; + anyhow::ensure!( + output.is_ok() && output.unwrap().status.success(), + "dpm CLI not found or broken — install from https://docs.digitalasset.com" + ); + + // 2. Resolve DAR path (env var with fallback) + let dar_path = match std::env::var("CANTON_DAR_PATH") { + Ok(p) => PathBuf::from(p), + Err(_) => PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join(DEFAULT_DAR_RELATIVE_PATH), + }; + anyhow::ensure!(dar_path.exists(), "DAR not found at {}", dar_path.display()); + + // 3. Generate JWT key + cert + HOCON auth config. + let auth = generate_jwt_auth_material()?; + + // 4. Start dpm sandbox WITH auth but WITHOUT --dar. + // `dpm sandbox --dar` uses the ledger-api gRPC for DAR upload, which + // fails with PERMISSION_DENIED when auth is enabled. Instead, we start + // without --dar, wait for readiness, then upload the DAR via the HTTP + // JSON API with a proper admin JWT. This is the pattern used by the + // official cn-quickstart. + let process = Command::new("dpm") + .arg("sandbox") + .arg("--json-api-port") + .arg(CANTON_JSON_API_PORT.to_string()) + .arg("-c") + .arg(&auth.auth_conf_path) + .spawn() + .context("failed to start dpm sandbox")?; + + let base_url = format!("http://127.0.0.1:{CANTON_JSON_API_PORT}"); + let ws_url = format!("ws://127.0.0.1:{CANTON_JSON_API_PORT}"); + + // 5. Wait for readiness (docs endpoint + synchronizer connected) + wait_for_canton_ready(&base_url, &auth.private_key_pem).await?; + + // 6. Upload DAR via HTTP API with admin JWT (two-phase bootstrap). + let admin_client = + CantonTestClient::new(&base_url, "participant_admin", auth.private_key_pem.clone()); + admin_client.upload_dar(&dar_path).await?; + + // 7. Setup parties, user, and contracts — all with JWT auth. + // Use participant_admin for bootstrap (party/user creation), + // then switch to the test user for contract operations. + let user_id = format!("mpc-test-{}", uuid::Uuid::new_v4()); + let sig_network = admin_client.allocate_party_with_retry("SigNetwork").await?; + let operator = admin_client.allocate_party_with_retry("Operator").await?; + let requester = admin_client.allocate_party_with_retry("Requester").await?; + admin_client + .create_user(&user_id, &sig_network, &[&operator, &requester]) + .await?; + + let client = CantonTestClient::new(&base_url, &user_id, auth.private_key_pem.clone()); + + let signer_result = client + .create_contract( + &[&sig_network], + "#daml-signer:Signer:Signer", + json!({ "sigNetwork": &sig_network }), + ) + .await?; + let (signer_cid, signer_template_id) = find_created_contract(&signer_result, "Signer")?; + + let vault_id = "test-vault"; + // evmVaultAddress is all zeros — args[0] in test requests must match + let vault_result = client + .create_contract( + &[&operator], + "#daml-vault:Erc20Vault:Vault", + json!({ + "operators": [&operator], + "sigNetwork": &sig_network, + "evmVaultAddress": "0".repeat(64), + "evmMpcPublicKey": "", + "vaultId": vault_id, + }), + ) + .await?; + let (vault_cid, _) = find_created_contract(&vault_result, "Vault")?; + + // Fetch disclosed contracts (needed for requester to exercise Vault choices) + let vault_disclosure = client + .get_disclosed_contract(&[&operator], "#daml-vault:Erc20Vault:Vault", &vault_cid) + .await?; + let signer_disclosure = client + .get_disclosed_contract( + &[&sig_network], + "#daml-signer:Signer:Signer", + &signer_cid, + ) + .await?; + + // Issue initial SigningNonce for the requester + let nonce_result = client + .exercise_choice( + &[&requester], + &signer_template_id, + &signer_cid, + "IssueNonce", + json!({ "requester": &requester }), + Some(&[signer_disclosure.clone()]), + ) + .await?; + let nonce_cid = find_created_cid(&nonce_result, "SigningNonce")?; + + Ok(CantonSandbox { + process, + jwt_key_path: auth.key_path, + jwt_cert_path: auth.cert_path, + auth_conf_path: auth.auth_conf_path, + json_api_url: base_url, + json_api_ws_url: ws_url, + jwt_private_key_pem: auth.private_key_pem, + jwt_subject: user_id, + party_id: sig_network, + operator_party: operator, + requester_party: requester, + signer_cid, + signer_template_id, + vault_cid, + vault_disclosure, + signer_disclosure, + nonce_cid, + client, + }) + } + + /// Produce the CantonConfig for MPC node CLI args. + pub fn get_config(&self) -> CantonConfig { + CantonConfig { + json_api_url: self.json_api_url.clone(), + json_api_ws_url: self.json_api_ws_url.clone(), + jwt_private_key_path: self.jwt_key_path.to_string_lossy().to_string(), + jwt_subject: self.jwt_subject.clone(), + party_id: self.party_id.clone(), + signer_contract_id: self.signer_cid.clone(), + signer_template_id: self.signer_template_id.clone(), + } + } +} + +impl Drop for CantonSandbox { + fn drop(&mut self) { + // Kill the Canton process group. `dpm` spawns a Java child process that + // binds multiple ports (7575, 6865, 6868). Killing just the parent leaves + // the JVM alive. Use `pkill -P` to kill child processes, then the parent. + let pid = self.process.id(); + let _ = std::process::Command::new("pkill") + .args(["-9", "-P", &pid.to_string()]) + .output(); + let _ = std::process::Command::new("kill") + .args(["-9", &pid.to_string()]) + .output(); + tracing::info!("canton sandbox killed (pid {pid} + children), waiting for cleanup"); + // Wait until ALL Canton ports are fully released. + for port in [CANTON_JSON_API_PORT, 6865, 6868] { + for i in 0..40 { + match std::net::TcpStream::connect(("127.0.0.1", port)) { + Ok(_) => { + if i % 10 == 0 { + tracing::debug!("waiting for port {port} to be released..."); + } + std::thread::sleep(std::time::Duration::from_millis(500)); + } + Err(_) => break, + } + } + } + let _ = std::fs::remove_file(&self.jwt_key_path); + let _ = std::fs::remove_file(&self.jwt_cert_path); + let _ = std::fs::remove_file(&self.auth_conf_path); + } +} + +/// Wait for Canton to be fully ready. +/// Phase 1: /docs/openapi (unauthenticated) — confirms the process is listening. +/// Phase 2: Authenticated party allocation probe — confirms the synchronizer is +/// connected. With `alpha-dynamic.dars`, the synchronizer loads +/// asynchronously after the HTTP server starts. +async fn wait_for_canton_ready(base_url: &str, jwt_private_key_pem: &str) -> Result<()> { + let client = reqwest::Client::new(); + + // Phase 1: wait for the HTTP server to start + let docs_url = format!("{base_url}/docs/openapi"); + for attempt in 0..120 { + match client.get(&docs_url).send().await { + Ok(resp) if resp.status().is_success() => { + tracing::info!("canton docs endpoint ready after {attempt} attempts"); + break; + } + _ => tokio::time::sleep(Duration::from_millis(500)).await, + } + if attempt == 119 { + anyhow::bail!("canton sandbox did not become ready within 60 seconds"); + } + } + + // Phase 2: wait for the synchronizer to be connected using an authenticated + // party-allocation probe. Uses `participant_admin` JWT to bypass user checks. + let probe_client = CantonTestClient::new(base_url, "participant_admin", jwt_private_key_pem.to_string()); + let api_url = format!("{base_url}/v2/parties"); + for attempt in 0..120 { + match probe_client + .auth_post(&api_url)? + .json(&serde_json::json!({ + "partyIdHint": "_readiness_probe", + "identityProviderId": "", + "synchronizerId": "", + "userId": "" + })) + .send() + .await + { + Ok(resp) => { + let status = resp.status().as_u16(); + if status == 200 || status == 409 { + // 200 = party created (synchronizer up) + // 409 = party already exists (synchronizer up) + tracing::info!( + "canton synchronizer ready after {attempt} additional attempts (status: {status})" + ); + return Ok(()); + } + if status == 400 { + let body = resp.text().await.unwrap_or_default(); + if body.contains("WITHOUT_CONNECTED_SYNCHRONIZER") { + if attempt % 10 == 0 { + tracing::debug!("waiting for canton synchronizer (attempt {attempt})..."); + } + tokio::time::sleep(Duration::from_millis(500)).await; + continue; + } + // Other 400 = API is ready, request issue + tracing::info!( + "canton synchronizer ready after {attempt} additional attempts (status: 400)" + ); + return Ok(()); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + _ => tokio::time::sleep(Duration::from_millis(500)).await, + } + } + anyhow::bail!("canton synchronizer did not become ready within 60 seconds") +} + +// --------------------------------------------------------------------------- +// CantonTestClient with JWT ES256 auth +// --------------------------------------------------------------------------- + +#[derive(Clone)] +pub struct CantonTestClient { + http: reqwest::Client, + base_url: String, + user_id: String, + jwt_private_key_pem: String, +} + +impl CantonTestClient { + pub fn new(base_url: &str, user_id: &str, jwt_private_key_pem: String) -> Self { + Self { + http: reqwest::Client::new(), + base_url: base_url.to_string(), + user_id: user_id.to_string(), + jwt_private_key_pem, + } + } + + fn generate_jwt(&self) -> Result { + use jsonwebtoken::{encode, Algorithm, EncodingKey, Header}; + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_secs(); + #[derive(serde::Serialize)] + struct Claims { + sub: String, + scope: String, + iat: u64, + exp: u64, + } + let claims = Claims { + sub: self.user_id.clone(), + scope: "daml_ledger_api".to_string(), + iat: now, + exp: now + 30, + }; + let key = EncodingKey::from_ec_pem(self.jwt_private_key_pem.as_bytes())?; + Ok(encode(&Header::new(Algorithm::ES256), &claims, &key)?) + } + + fn auth_post(&self, url: &str) -> Result { + Ok(self.http.post(url).bearer_auth(self.generate_jwt()?)) + } + + fn auth_get(&self, url: &str) -> Result { + Ok(self.http.get(url).bearer_auth(self.generate_jwt()?)) + } + + /// Upload a DAR file via the JSON API (POST /v2/packages). + /// Requires admin JWT (sub: "participant_admin"). + pub async fn upload_dar(&self, dar_path: &std::path::Path) -> Result<()> { + let dar_bytes = std::fs::read(dar_path) + .context(format!("failed to read DAR at {}", dar_path.display()))?; + for attempt in 0..30 { + let resp = self + .http + .post(format!("{}/v2/packages", self.base_url)) + .bearer_auth(self.generate_jwt()?) + .header("Content-Type", "application/octet-stream") + .body(dar_bytes.clone()) + .send() + .await?; + if resp.status().is_success() { + tracing::info!("DAR uploaded successfully"); + return Ok(()); + } + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + // Retry on synchronizer not ready + if body.contains("WITHOUT_CONNECTED_SYNCHRONIZER") + || body.contains("PACKAGE_SERVICE_CANNOT_AUTODETECT") + { + if attempt % 5 == 0 { + tracing::debug!("retrying DAR upload (attempt {attempt}): {status}"); + } + tokio::time::sleep(Duration::from_secs(2)).await; + continue; + } + anyhow::bail!("DAR upload failed: HTTP {status} — {body}"); + } + anyhow::bail!("DAR upload failed after 30 retries") + } + + /// Allocate a party, retrying if the synchronizer is still connecting. + pub async fn allocate_party_with_retry(&self, hint: &str) -> Result { + for attempt in 0..30 { + match self.allocate_party(hint).await { + Ok(party) => return Ok(party), + Err(e) => { + let msg = e.to_string(); + if msg.contains("WITHOUT_CONNECTED_SYNCHRONIZER") { + if attempt % 5 == 0 { + tracing::debug!("retrying allocate_party({hint}), synchronizer not ready (attempt {attempt})"); + } + tokio::time::sleep(Duration::from_secs(1)).await; + continue; + } + return Err(e); + } + } + } + anyhow::bail!("allocate_party({hint}) failed after 30 retries — synchronizer never connected") + } + + pub async fn allocate_party(&self, hint: &str) -> Result { + let req = AllocatePartyRequest { + party_id_hint: hint.to_string(), + identity_provider_id: None, + synchronizer_id: None, + user_id: None, + }; + let resp = self + .auth_post(&format!("{}/v2/parties", self.base_url))? + .json(&req) + .send() + .await?; + if !resp.status().is_success() { + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + anyhow::bail!("allocate_party({hint}) failed: HTTP {status} — {body}"); + } + let body: AllocatePartyResponse = resp.json().await?; + Ok(body.party_details.party) + } + + pub async fn create_user( + &self, + user_id: &str, + primary_party: &str, + additional: &[&str], + ) -> Result<()> { + let mut rights = Vec::new(); + for party in std::iter::once(&primary_party).chain(additional.iter()) { + rights.push(ledger_api::can_act_as(party)); + rights.push(ledger_api::can_read_as(party)); + } + let req = CreateUserRequest { + user: UserInfo { + id: user_id.to_string(), + primary_party: primary_party.to_string(), + is_deactivated: false, + identity_provider_id: String::new(), + }, + rights, + }; + let resp = self + .auth_post(&format!("{}/v2/users", self.base_url))? + .json(&req) + .send() + .await?; + if !resp.status().is_success() { + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + anyhow::bail!("create_user({user_id}) failed: HTTP {status} — {body}"); + } + Ok(()) + } + + pub async fn create_contract( + &self, + act_as: &[&str], + template_id: &str, + args: Value, + ) -> Result { + let parties: Vec = act_as.iter().map(|s| s.to_string()).collect(); + // Retry on transient errors (package vetting, synchronizer connectivity). + for attempt in 0..30 { + let req = SubmitAndWaitForTransactionRequest { + commands: JsCommands { + command_id: uuid::Uuid::new_v4().to_string(), + user_id: self.user_id.clone(), + act_as: parties.clone(), + read_as: parties.clone(), + commands: vec![ledger_api::Command::CreateCommand { + template_id: template_id.to_string(), + create_arguments: args.clone(), + }], + disclosed_contracts: vec![], + }, + }; + let resp = self + .auth_post(&format!( + "{}/v2/commands/submit-and-wait-for-transaction", + self.base_url + ))? + .json(&req) + .send() + .await?; + if resp.status().is_success() { + return Ok(resp.json().await?); + } + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + let code = status.as_u16(); + if (code == 400 || code == 404) + && (body.contains("PACKAGE_SELECTION_FAILED") + || body.contains("PACKAGE_NAMES_NOT_FOUND") + || body.contains("TEMPLATES_OR_INTERFACES_NOT_FOUND") + || body.contains("WITHOUT_CONNECTED_SYNCHRONIZER")) + { + if attempt % 5 == 0 { + tracing::debug!("create_contract({template_id}) retrying: {status} (attempt {attempt})"); + } + tokio::time::sleep(Duration::from_secs(2)).await; + continue; + } + anyhow::bail!("create_contract({template_id}) failed: HTTP {status} — {body}"); + } + anyhow::bail!("create_contract({template_id}) failed after 30 retries") + } + + pub async fn exercise_choice( + &self, + act_as: &[&str], + template_id: &str, + contract_id: &str, + choice: &str, + choice_argument: Value, + disclosed_contracts: Option<&[Value]>, + ) -> Result { + let parties: Vec = act_as.iter().map(|s| s.to_string()).collect(); + let disclosed: Vec = disclosed_contracts + .unwrap_or(&[]) + .iter() + .map(|v| serde_json::from_value(v.clone())) + .collect::, _>>() + .context("invalid DisclosedContract JSON")?; + let req = SubmitAndWaitForTransactionRequest { + commands: JsCommands { + command_id: uuid::Uuid::new_v4().to_string(), + user_id: self.user_id.clone(), + act_as: parties.clone(), + read_as: parties, + commands: vec![ledger_api::Command::ExerciseCommand { + template_id: template_id.to_string(), + contract_id: contract_id.to_string(), + choice: choice.to_string(), + choice_argument, + }], + disclosed_contracts: disclosed, + }, + }; + let resp = self + .auth_post(&format!( + "{}/v2/commands/submit-and-wait-for-transaction", + self.base_url + ))? + .json(&req) + .send() + .await? + .error_for_status()?; + Ok(resp.json().await?) + } + + /// Fetch a disclosed contract blob for cross-party visibility. + pub async fn get_disclosed_contract( + &self, + parties: &[&str], + template_id: &str, + contract_id: &str, + ) -> Result { + let end: LedgerEndResponse = self + .auth_get(&format!("{}/v2/state/ledger-end", self.base_url))? + .send() + .await? + .error_for_status()? + .json() + .await?; + + let mut filters = serde_json::Map::new(); + for party in parties { + filters.insert( + party.to_string(), + json!({ + "cumulative": [{ "identifierFilter": { "TemplateFilter": { "value": { + "templateId": template_id, "includeCreatedEventBlob": true + }}}}] + }), + ); + } + let req = GetActiveContractsRequest { + active_at_offset: end.offset, + event_format: EventFormat { + filters_by_party: filters, + verbose: true, + }, + }; + let resp: Vec = self + .auth_post(&format!("{}/v2/state/active-contracts", self.base_url))? + .json(&req) + .send() + .await? + .error_for_status()? + .json() + .await?; + + for entry in &resp { + if let Some(ContractEntry::JsActiveContract(ac)) = &entry.contract_entry { + if ac.created_event.contract_id == contract_id { + return Ok(json!({ + "templateId": ac.created_event.template_id, + "contractId": ac.created_event.contract_id, + "createdEventBlob": ac.created_event.created_event_blob, + "synchronizerId": ac.synchronizer_id, + })); + } + } + } + anyhow::bail!("disclosed contract not found for {contract_id}") + } + + pub async fn get_active_contracts( + &self, + parties: &[&str], + template_id: &str, + ) -> Result> { + let end: LedgerEndResponse = self + .auth_get(&format!("{}/v2/state/ledger-end", self.base_url))? + .send() + .await? + .error_for_status()? + .json() + .await?; + + let mut filters = serde_json::Map::new(); + for party in parties { + filters.insert( + party.to_string(), + json!({ + "cumulative": [{ "identifierFilter": { "TemplateFilter": { "value": { + "templateId": template_id, "includeCreatedEventBlob": false + }}}}] + }), + ); + } + let req = GetActiveContractsRequest { + active_at_offset: end.offset, + event_format: EventFormat { + filters_by_party: filters, + verbose: true, + }, + }; + let resp: Vec = self + .auth_post(&format!("{}/v2/state/active-contracts", self.base_url))? + .json(&req) + .send() + .await? + .error_for_status()? + .json() + .await?; + Ok(resp) + } + + pub async fn poll_for_contract( + &self, + parties: &[&str], + template_id: &str, + predicate: impl Fn(&Value) -> bool, + timeout: Duration, + ) -> Result { + let start = std::time::Instant::now(); + loop { + if start.elapsed() > timeout { + anyhow::bail!("timeout waiting for {template_id} after {timeout:?}"); + } + let contracts = self.get_active_contracts(parties, template_id).await?; + for item in &contracts { + if let Some(ac) = item + .get("contractEntry") + .and_then(|e| e.get("JsActiveContract")) + { + let payload = ac["createdEvent"] + .get("payload") + .or_else(|| ac["createdEvent"].get("createArgument")) + .unwrap_or(&ac["createdEvent"]); + if predicate(payload) { + return Ok(ac.clone()); + } + } + } + tokio::time::sleep(Duration::from_secs(3)).await; + } + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn find_created_contract(result: &Value, suffix: &str) -> Result<(String, String)> { + let resp: SubmitAndWaitForTransactionResponse = + serde_json::from_value(result.clone()).context("failed to parse transaction response")?; + for event in &resp.transaction.events { + if let ledger_api::Event::CreatedEvent(created) = event { + if ledger_api::template_suffix_matches(&created.template_id, suffix) { + return Ok((created.contract_id.clone(), created.template_id.clone())); + } + } + } + anyhow::bail!("no CreatedEvent for {suffix}") +} + +/// Extract contract ID from a transaction result. Public for use in test modules. +pub fn find_created_cid(result: &Value, suffix: &str) -> Result { + find_created_contract(result, suffix).map(|(cid, _)| cid) +} diff --git a/integration-tests/src/cluster/mod.rs b/integration-tests/src/cluster/mod.rs index 21a45a1b7..57ca2f4af 100644 --- a/integration-tests/src/cluster/mod.rs +++ b/integration-tests/src/cluster/mod.rs @@ -39,6 +39,7 @@ pub struct Cluster { pub nodes: Nodes, pub account_idx: usize, pub solana: Option, + pub canton: Option, } impl Cluster { diff --git a/integration-tests/src/cluster/spawner.rs b/integration-tests/src/cluster/spawner.rs index a84f4e78a..07f11384c 100644 --- a/integration-tests/src/cluster/spawner.rs +++ b/integration-tests/src/cluster/spawner.rs @@ -123,6 +123,8 @@ pub struct ClusterSpawner { pub redis: Option, pub worker: Option>, pub solana: Option, + pub canton: Option, + pub use_canton: bool, pub program_address: Option, prestockpile: Option, pub pregenerated_keys: PregeneratedKeys, @@ -157,6 +159,8 @@ impl Default for ClusterSpawner { redis: None, worker: None, solana: None, + canton: None, + use_canton: false, program_address: None, prestockpile: Some(Prestockpile { multiplier: 4 }), pregenerated_keys: PregeneratedKeys::load(nodes).unwrap(), @@ -290,6 +294,11 @@ impl ClusterSpawner { self } + pub fn canton(mut self) -> Self { + self.use_canton = true; + self + } + pub fn debug_node(&mut self) -> &mut Self { self.release = false; self @@ -412,6 +421,14 @@ impl IntoFuture for ClusterSpawner { self.solana = Some(solana); } + // Canton setup (follows Solana pattern — started before self.run(), + // stored on spawner, moved to Cluster via .take()) + if self.use_canton && self.canton.is_none() { + let sandbox = crate::canton::CantonSandbox::run().await?; + self.cfg.canton = Some(sandbox.get_config()); + self.canton = Some(sandbox); + } + let nodes = self.run().await?; let connector = near_jsonrpc_client::JsonRpcClient::new_client(); let jsonrpc_client = connector.connect(nodes.ctx().worker.rpc_addr()); @@ -424,6 +441,7 @@ impl IntoFuture for ClusterSpawner { docker_client: self.docker, account_idx: nodes.len(), solana: self.solana.take(), + canton: self.canton.take(), nodes, }; diff --git a/integration-tests/src/containers.rs b/integration-tests/src/containers.rs index 240785b78..1eca6eefb 100644 --- a/integration-tests/src/containers.rs +++ b/integration-tests/src/containers.rs @@ -127,6 +127,7 @@ impl Node { eth: eth_args, sol: sol_args, hydration: hydration_args, + canton: mpc_node::indexer_canton::CantonArgs::from_config(config.cfg.canton.clone()), my_address: None, storage_options: ctx.storage_options.clone(), log_options: ctx.log_options.clone(), diff --git a/integration-tests/src/lib.rs b/integration-tests/src/lib.rs index 4b8efce2a..a82ab53e1 100644 --- a/integration-tests/src/lib.rs +++ b/integration-tests/src/lib.rs @@ -1,4 +1,5 @@ pub mod actions; +pub mod canton; pub mod cluster; pub mod containers; pub mod eth; @@ -63,6 +64,7 @@ pub struct NodeConfig { pub eth: Option, pub sol: Option, pub hydration: Option, + pub canton: Option, } impl Default for NodeConfig { @@ -88,6 +90,7 @@ impl Default for NodeConfig { eth: None, sol: None, hydration: None, + canton: None, } } } diff --git a/integration-tests/src/local.rs b/integration-tests/src/local.rs index b486cad24..ccdda1233 100644 --- a/integration-tests/src/local.rs +++ b/integration-tests/src/local.rs @@ -84,6 +84,7 @@ impl Node { eth, sol, hydration, + canton: mpc_node::indexer_canton::CantonArgs::from_config(cfg.canton.clone()), indexer_options, my_address: None, storage_options: ctx.storage_options.clone(), @@ -183,6 +184,7 @@ impl Node { eth, sol, hydration, + canton: mpc_node::indexer_canton::CantonArgs::from_config(config.cfg.canton.clone()), indexer_options, my_address: None, storage_options: ctx.storage_options.clone(), diff --git a/integration-tests/tests/cases/canton.rs b/integration-tests/tests/cases/canton.rs new file mode 100644 index 000000000..1d18321c1 --- /dev/null +++ b/integration-tests/tests/cases/canton.rs @@ -0,0 +1,163 @@ +use anyhow::{Context as _, Result}; +use integration_tests::cluster; +use mpc_primitives::LATEST_MPC_KEY_VERSION; +use serde_json::json; +use std::time::Duration; +use test_log::test; + +#[ignore] // requires dpm + openssl + Docker (for Ethereum) +#[test(tokio::test)] +async fn test_canton_eth_bidirectional_flow() -> Result<()> { + // 1. Spawn cluster with Canton + Ethereum + let nodes = cluster::spawn() + .disable_prestockpile() + .canton() + .ethereum() + .await?; + + nodes.wait().signable().await?; + + // 2. Get Canton and Ethereum contexts + let canton = nodes + .canton + .as_ref() + .context("canton sandbox not available")?; + let _eth_ctx = nodes + .nodes + .ctx() + .ethereum + .as_ref() + .context("ethereum not available")?; + + // 3. Submit sign request via Vault (nonce-based flow) + let client = &canton.client; + let vault_template = "#daml-vault:Erc20Vault:Vault"; + + let evm_tx_params = json!({ + "to": "a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "functionSignature": "transfer(address,uint256)", + "args": [ + "0".repeat(64), + "0000000000000000000000000000000000000000000000000000000005f5e100" + ], + "value": "0".repeat(64), + "nonce": format!("{:0>64}", "1"), + "gasLimit": format!("{:0>64}", "186a0"), + "maxFeePerGas": format!("{:0>64}", "3b9aca00"), + "maxPriorityFee": format!("{:0>64}", "3b9aca00"), + "chainId": format!("{:0>64}", "aa36a7"), + }); + + let deposit_result = client + .exercise_choice( + &[&canton.requester_party], + vault_template, + &canton.vault_cid, + "RequestDeposit", + json!({ + "requester": &canton.requester_party, + "signerCid": &canton.signer_cid, + "path": &canton.requester_party, + "evmTxParams": evm_tx_params, + "nonceCid": &canton.nonce_cid, + "nonceCidText": &canton.nonce_cid, + "keyVersion": LATEST_MPC_KEY_VERSION, + "algo": "ECDSA", + "dest": "ethereum", + "params": "", + "outputDeserializationSchema": r#"[{"name":"","type":"bool"}]"#, + "respondSerializationSchema": r#"[{"name":"","type":"bool"}]"#, + }), + Some(&[ + canton.vault_disclosure.clone(), + canton.signer_disclosure.clone(), + ]), + ) + .await?; + + // 4. Extract requestId from PendingDeposit + let events = deposit_result["transaction"]["events"] + .as_array() + .context("no events")?; + let mut request_id = String::new(); + for event in events { + if let Some(created) = event.get("CreatedEvent") { + if created["templateId"] + .as_str() + .unwrap_or("") + .contains("PendingDeposit") + { + let payload = created + .get("payload") + .or_else(|| created.get("createArgument")) + .context("no payload")?; + request_id = payload["requestId"] + .as_str() + .context("no requestId")? + .to_string(); + break; + } + } + } + anyhow::ensure!( + !request_id.is_empty(), + "no requestId found in deposit result" + ); + tracing::info!(%request_id, "canton deposit request submitted"); + + // 5. Poll for SignatureRespondedEvent matching the requestId + let sig_event = client + .poll_for_contract( + &[&canton.party_id], + "#daml-vault:Signer:SignatureRespondedEvent", + |payload| payload["requestId"].as_str() == Some(&request_id), + Duration::from_secs(120), + ) + .await + .context("timeout waiting for SignatureRespondedEvent")?; + + tracing::info!("received SignatureRespondedEvent"); + + // 6. Verify the signature exists + let sig_payload = sig_event["createdEvent"] + .get("payload") + .or_else(|| sig_event["createdEvent"].get("createArgument")) + .context("no payload in SignatureRespondedEvent")?; + let signature_hex = sig_payload["signature"] + .as_str() + .context("missing signature field")?; + assert!(!signature_hex.is_empty(), "signature is empty"); + + // 7. Poll for RespondBidirectionalEvent (MPC posted the outcome) + let respond_event = client + .poll_for_contract( + &[&canton.party_id], + "#daml-vault:Signer:RespondBidirectionalEvent", + |payload| payload["requestId"].as_str() == Some(&request_id), + Duration::from_secs(120), + ) + .await + .context("timeout waiting for RespondBidirectionalEvent")?; + + tracing::info!("received RespondBidirectionalEvent"); + + // 8. Verify the respond event has the same requestId + let respond_payload = respond_event["createdEvent"] + .get("payload") + .or_else(|| respond_event["createdEvent"].get("createArgument")) + .context("no payload in RespondBidirectionalEvent")?; + assert_eq!( + respond_payload["requestId"].as_str(), + Some(request_id.as_str()), + "RespondBidirectionalEvent requestId mismatch" + ); + + // Verify the respond event has serializedOutput + assert!( + respond_payload.get("serializedOutput").is_some(), + "RespondBidirectionalEvent missing serializedOutput" + ); + + tracing::info!("Canton bidirectional flow completed successfully"); + Ok(()) +} diff --git a/integration-tests/tests/cases/canton_stream.rs b/integration-tests/tests/cases/canton_stream.rs new file mode 100644 index 000000000..0c8310a22 --- /dev/null +++ b/integration-tests/tests/cases/canton_stream.rs @@ -0,0 +1,402 @@ +use anyhow::{Context as _, Result}; +use integration_tests::canton::CantonSandbox; +use mpc_node::backlog::Backlog; +use mpc_node::indexer_canton::CantonStream; +use mpc_node::protocol::Chain; +use mpc_node::stream::{ChainEvent, ChainStream}; +use mpc_node::protocol::IndexedSignRequest; +use mpc_primitives::LATEST_MPC_KEY_VERSION; +use serde_json::json; +use std::collections::HashSet; +use std::time::Duration; +use test_log::test; +use tokio::time::timeout; + +/// Start a Canton sandbox with deployed contracts (no MPC cluster). +async fn canton_sandbox() -> Result { + CantonSandbox::run().await +} + +/// Create a CantonStream from the sandbox config with an externally-provided Backlog. +/// Accepts Backlog as parameter (needed for checkpoint tests). +async fn stream_canton(sandbox: &CantonSandbox, backlog: Backlog) -> Result { + let config = sandbox.get_config(); + let mut stream = + CantonStream::new(Some(config), backlog).context("failed to create CantonStream")?; + ChainStream::start(&mut stream).await; + Ok(stream) +} + +/// Submit a sign request through the Vault contract. +/// Uses the nonce-based flow: RequestDeposit with pre-issued SigningNonce. +/// The Vault internally creates a SignRequest, exercises Signer.SignBidirectional +/// (which archives the nonce and creates SignBidirectionalEvent + new nonce). +/// Updates sandbox.nonce_cid with the fresh nonce for the next call. +/// Returns the requestId from the PendingDeposit event. +async fn submit_canton_sign_request(sandbox: &mut CantonSandbox) -> Result { + let client = &sandbox.client; + let vault_template = "#daml-vault:Erc20Vault:Vault"; + + // args[0] MUST match evmVaultAddress ("0".repeat(64)) — Daml asserts this + let evm_tx_params = json!({ + "to": "a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "functionSignature": "transfer(address,uint256)", + "args": [ + "0".repeat(64), + "0000000000000000000000000000000000000000000000000000000005f5e100" + ], + "value": "0".repeat(64), + "nonce": format!("{:0>64}", "1"), + "gasLimit": format!("{:0>64}", "186a0"), + "maxFeePerGas": format!("{:0>64}", "3b9aca00"), + "maxPriorityFee": format!("{:0>64}", "3b9aca00"), + "chainId": format!("{:0>64}", "aa36a7"), + }); + + // RequestDeposit — nonceCid is the pre-issued SigningNonce + let deposit_result = client + .exercise_choice( + &[&sandbox.requester_party], + vault_template, + &sandbox.vault_cid, + "RequestDeposit", + json!({ + "requester": &sandbox.requester_party, + "signerCid": &sandbox.signer_cid, + "path": &sandbox.requester_party, + "evmTxParams": evm_tx_params, + "nonceCid": &sandbox.nonce_cid, + "nonceCidText": &sandbox.nonce_cid, + "keyVersion": LATEST_MPC_KEY_VERSION, + "algo": "ECDSA", + "dest": "ethereum", + "params": "", + "outputDeserializationSchema": r#"[{"name":"","type":"bool"}]"#, + "respondSerializationSchema": r#"[{"name":"","type":"bool"}]"#, + }), + Some(&[ + sandbox.vault_disclosure.clone(), + sandbox.signer_disclosure.clone(), + ]), + ) + .await?; + + // Extract requestId from PendingDeposit and update nonce_cid from new SigningNonce + let events = deposit_result["transaction"]["events"] + .as_array() + .context("no events")?; + let mut request_id = None; + for event in events { + if let Some(created) = event.get("CreatedEvent") { + let tid = created["templateId"].as_str().unwrap_or(""); + if tid.contains("PendingDeposit") { + let payload = created + .get("payload") + .or_else(|| created.get("createArgument")) + .context("no payload")?; + request_id = Some( + payload["requestId"] + .as_str() + .map(|s| s.to_string()) + .context("no requestId")?, + ); + } + // SignBidirectional creates a fresh SigningNonce — update for next call + if tid.contains("SigningNonce") { + sandbox.nonce_cid = created["contractId"] + .as_str() + .context("no contractId on new SigningNonce")? + .to_string(); + } + } + } + request_id.context("no PendingDeposit in RequestDeposit result") +} + +/// Poll stream for a SignRequest event with timeout. +async fn wait_for_sign_request( + stream: &mut CantonStream, + timeout_secs: u64, +) -> Result { + timeout(Duration::from_secs(timeout_secs), async { + loop { + match stream.next_event().await { + Some(ChainEvent::SignRequest(req)) => return Ok(req), + Some(ChainEvent::Block(_)) => continue, + Some(_) => continue, + None => tokio::time::sleep(Duration::from_millis(100)).await, + } + } + }) + .await + .context("timeout waiting for SignRequest")? +} + +#[ignore] // requires dpm +#[test(tokio::test)] +async fn test_canton_stream_parse_sign_event() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + let backlog = Backlog::new(); + let mut stream = stream_canton(&sandbox, backlog).await?; + + let _request_id = submit_canton_sign_request(&mut sandbox).await?; + + let event = wait_for_sign_request(&mut stream, 30).await?; + + assert_eq!(event.chain, Chain::Canton); + assert_eq!(event.args.key_version, LATEST_MPC_KEY_VERSION); + // Canton only supports bidirectional — verify the kind + assert!( + matches!( + event.kind, + mpc_node::protocol::SignKind::SignBidirectional(_) + ), + "expected SignBidirectional, got {:?}", + event.kind + ); + Ok(()) +} + +#[ignore] +#[test(tokio::test)] +async fn test_canton_stream_emits_blocks() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + let backlog = Backlog::new(); + let mut stream = stream_canton(&sandbox, backlog).await?; + + // Submit a request to generate ledger activity + let _ = submit_canton_sign_request(&mut sandbox).await?; + + let mut saw_block = false; + for _ in 0..10 { + match timeout(Duration::from_secs(5), stream.next_event()).await { + Ok(Some(ChainEvent::Block(_))) => { + saw_block = true; + break; + } + Ok(Some(_)) => continue, + Ok(None) => { + anyhow::bail!("stream returned None unexpectedly"); + } + Err(_) => break, // timeout + } + } + assert!( + saw_block, + "expected at least one Block event from Canton stream" + ); + Ok(()) +} + +#[ignore] +#[test(tokio::test)] +async fn test_canton_stream_concurrent_events() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + let backlog = Backlog::new(); + let mut stream = stream_canton(&sandbox, backlog).await?; + + // Submit 3 sign requests (each needs its own auth cycle) + let mut expected_request_ids = HashSet::new(); + for _ in 0..3 { + let rid = submit_canton_sign_request(&mut sandbox).await?; + expected_request_ids.insert(rid); + } + + // Collect SignRequest events until we have all 3 + let mut received = Vec::new(); + for _ in 0..20 { + match timeout(Duration::from_secs(5), stream.next_event()).await { + Ok(Some(ChainEvent::SignRequest(req))) => { + received.push(req); + if received.len() >= 3 { + break; + } + } + Ok(Some(_)) => continue, + Ok(None) => anyhow::bail!("stream closed"), + Err(_) => break, + } + } + + assert_eq!( + received.len(), + 3, + "expected 3 SignRequest events, got {}", + received.len() + ); + + // Verify received IDs are distinct (no duplicate replays) + let received_ids: HashSet<_> = received + .iter() + .map(|r| hex::encode(r.id.request_id)) + .collect(); + assert_eq!( + received_ids.len(), + 3, + "expected 3 distinct request IDs, got {}", + received_ids.len() + ); + Ok(()) +} + +#[ignore] +#[test(tokio::test)] +async fn test_canton_stream_catchup_linear() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + + // Phase 1: stream1 sees events + let backlog1 = Backlog::new(); + let mut stream1 = stream_canton(&sandbox, backlog1).await?; + + let _ = submit_canton_sign_request(&mut sandbox).await?; + + let mut seen_by_stream1 = 0; + let mut last_block_stream1: u64 = 0; + for _ in 0..10 { + match timeout(Duration::from_millis(500), stream1.next_event()).await { + Ok(Some(ChainEvent::SignRequest(_))) => seen_by_stream1 += 1, + Ok(Some(ChainEvent::Block(b))) => { + if b > last_block_stream1 { + last_block_stream1 = b; + } + } + Ok(Some(_)) => {} + _ => break, + } + } + assert!(seen_by_stream1 > 0, "stream1 saw no events"); + assert!(last_block_stream1 > 0, "stream1 saw no blocks"); + + // Drop stream1 + drop(stream1); + + // Phase 2: stream2 should catch up and see new events + let backlog2 = Backlog::new(); + let mut stream2 = stream_canton(&sandbox, backlog2).await?; + + let _ = submit_canton_sign_request(&mut sandbox).await?; + + let mut caught_up = false; + let mut seen_sign_events = false; + for _ in 0..20 { + match timeout(Duration::from_secs(1), stream2.next_event()).await { + Ok(Some(ChainEvent::Block(b))) if b >= last_block_stream1 => caught_up = true, + Ok(Some(ChainEvent::SignRequest(_))) => seen_sign_events = true, + Ok(Some(_)) => {} + _ => break, + } + if caught_up && seen_sign_events { + break; + } + } + assert!( + caught_up, + "stream2 did not catch up to stream1's block height" + ); + assert!(seen_sign_events, "stream2 saw no SignRequest events"); + Ok(()) +} + +#[ignore] +#[test(tokio::test)] +async fn test_canton_stream_checkpoint_persistence() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + + // Phase 1: create stream, submit event, set a checkpoint on the first Block + let backlog1 = Backlog::new(); + let mut stream1 = stream_canton(&sandbox, backlog1.clone()).await?; + + let _ = submit_canton_sign_request(&mut sandbox).await?; + + let mut checkpoint_block = None; + for _ in 0..10 { + match timeout(Duration::from_secs(1), stream1.next_event()).await { + Ok(Some(ChainEvent::Block(block))) => { + backlog1.set_processed_block(Chain::Canton, block).await; + checkpoint_block = Some(block); + break; + } + Ok(Some(_)) => continue, + _ => break, + } + } + assert!(checkpoint_block.is_some(), "no Block event to checkpoint"); + drop(stream1); + + // Phase 2: new stream should start from checkpoint and see new events + let backlog2 = Backlog::new(); + let mut stream2 = stream_canton(&sandbox, backlog2).await?; + + let _ = submit_canton_sign_request(&mut sandbox).await?; + + let event = timeout(Duration::from_secs(10), async { + loop { + if let Some(ev) = stream2.next_event().await { + return ev; + } + } + }) + .await + .context("timeout waiting for event on stream2")?; + + assert!( + matches!(event, ChainEvent::SignRequest(_) | ChainEvent::Block(_)), + "expected SignRequest or Block, got {:?}", + event + ); + Ok(()) +} + +#[ignore] +#[test(tokio::test)] +async fn test_canton_stream_sign_and_respond_flow() -> Result<()> { + let mut sandbox = canton_sandbox().await?; + let backlog = Backlog::new(); + let mut stream = stream_canton(&sandbox, backlog).await?; + + // Submit a sign request and capture the request ID + let _request_id = submit_canton_sign_request(&mut sandbox).await?; + + // Wait for the SignRequest event from the stream + let sign_event = wait_for_sign_request(&mut stream, 30).await?; + assert_eq!(sign_event.chain, Chain::Canton); + + // Exercise Signer.Respond directly (no MPC cluster — we mock the response). + // DER signature with valid secp256k1 scalars (r=1, s=1) — not cryptographically + // meaningful but parseable by k256::ecdsa::Signature::from_der. + let dummy_der_sig = "3006020101020101"; + + sandbox + .client + .exercise_choice( + &[&sandbox.party_id], + &sandbox.signer_template_id, + &sandbox.signer_cid, + "Respond", + json!({ + "operators": [&sandbox.operator_party], + "requester": &sandbox.requester_party, + "requestId": &_request_id, + "signature": dummy_der_sig, + }), + None, + ) + .await?; + + // Poll for Respond event from the stream + let mut saw_respond = false; + for _ in 0..10 { + match timeout(Duration::from_secs(5), stream.next_event()).await { + Ok(Some(ChainEvent::Respond(ev))) => { + assert_eq!(ev.source_chain(), Chain::Canton); + saw_respond = true; + break; + } + Ok(Some(_)) => continue, + Ok(None) => anyhow::bail!("stream closed"), + Err(_) => break, + } + } + assert!(saw_respond, "expected Respond event from Canton stream"); + Ok(()) +} diff --git a/integration-tests/tests/cases/ethereum_stream.rs b/integration-tests/tests/cases/ethereum_stream.rs index ed9abf036..c974f4f60 100644 --- a/integration-tests/tests/cases/ethereum_stream.rs +++ b/integration-tests/tests/cases/ethereum_stream.rs @@ -14,6 +14,7 @@ use k256::elliptic_curve::sec1::ToEncodedPoint as _; use mpc_node::backlog::Backlog; use mpc_node::indexer_eth::{EthConfig, EthereumStream}; use mpc_node::protocol::{Chain, SignKind}; +use mpc_node::sign_bidirectional::ChainContext; use mpc_node::stream::ops::SignatureRespondedEvent; use mpc_node::stream::{ChainEvent, ChainStream}; use mpc_primitives::{SignId, LATEST_MPC_KEY_VERSION}; @@ -256,6 +257,7 @@ async fn test_ethereum_stream_execution_confirmation() -> Result<()> { from_address: AlloyAddress::from_slice(ctx.wallet.as_bytes()), nonce: 0, status: mpc_node::sign_bidirectional::SignStatus::PendingExecution, + chain_ctx: ChainContext::None, }; let sign_id = SignId::new([7u8; 32]); backlog.watch_execution(Chain::Ethereum, sign_id, tx).await; diff --git a/integration-tests/tests/cases/mod.rs b/integration-tests/tests/cases/mod.rs index 441b5b77f..de9029710 100644 --- a/integration-tests/tests/cases/mod.rs +++ b/integration-tests/tests/cases/mod.rs @@ -15,6 +15,8 @@ use mpc_primitives::LATEST_MPC_KEY_VERSION; use std::time::{Duration, Instant}; use test_log::test; +pub mod canton; +pub mod canton_stream; pub mod chains; pub mod compat; pub mod ethereum;