From 12c2fa263c6c19241b96f9ed7f8d90f9ca155d36 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Mon, 5 Jan 2026 22:11:04 +0800 Subject: [PATCH 01/11] feat: add priority queue for protocol messages to prevent signature loss under high load Under high transaction load, snapshot signatures were being lost because: - All messages shared a single FIFO queue - ReqSn/AckSn protocol messages got buried behind ReqTx transactions - When AckSn arrived before local ReqSn was processed, it got re-enqueued to the back of the queue, causing signature collection to fail Solution: Dual-queue system that processes protocol messages before transactions - HighPriority: ReqSn, AckSn, ChainInput, ClientInput, ConnectivityEvent - LowPriority: ReqTx, ReqDec This ensures protocol state machine messages are never starved by transaction load. --- hydra-node/src/Hydra/HeadLogic/Input.hs | 33 +++++++++++++- hydra-node/src/Hydra/Node.hs | 17 ++++--- hydra-node/src/Hydra/Node/InputQueue.hs | 48 +++++++++++++++----- hydra-node/test/Hydra/BehaviorSpec.hs | 10 ++-- hydra-node/test/Hydra/Model/MockChain.hs | 6 ++- hydra-node/test/Hydra/Node/InputQueueSpec.hs | 3 +- hydra-node/test/Hydra/NodeSpec.hs | 15 ++++-- 7 files changed, 100 insertions(+), 32 deletions(-) diff --git a/hydra-node/src/Hydra/HeadLogic/Input.hs b/hydra-node/src/Hydra/HeadLogic/Input.hs index 14291998e57..f212af24c16 100644 --- a/hydra-node/src/Hydra/HeadLogic/Input.hs +++ b/hydra-node/src/Hydra/HeadLogic/Input.hs @@ -1,17 +1,46 @@ {-# LANGUAGE UndecidableInstances #-} -module Hydra.HeadLogic.Input where +module Hydra.HeadLogic.Input ( + Input (..), + TTL, + MessagePriority (..), + inputPriority, +) where import Hydra.Prelude import Hydra.API.ClientInput (ClientInput) import Hydra.Chain (ChainEvent) import Hydra.Chain.ChainState (IsChainState) -import Hydra.Network.Message (Message, NetworkEvent) +import Hydra.Network.Message (Message (..), NetworkEvent (..)) import Hydra.Tx.IsTx (ArbitraryIsTx) type TTL = Natural +-- | Priority level for input messages. Protocol messages (ReqSn, AckSn) get +-- high priority to prevent them from being delayed by transaction messages +-- under high load. +data MessagePriority = HighPriority | LowPriority + deriving stock (Eq, Show, Generic) + +-- | Classify an input by its priority. Protocol messages that are critical +-- for snapshot progress get high priority, while transaction submissions +-- get low priority. +inputPriority :: Input tx -> MessagePriority +inputPriority = \case + -- Protocol messages: high priority to ensure snapshot progress + NetworkInput{networkEvent = ReceivedMessage{msg = ReqSn{}}} -> HighPriority + NetworkInput{networkEvent = ReceivedMessage{msg = AckSn{}}} -> HighPriority + -- Connectivity events: high priority for protocol health + NetworkInput{networkEvent = ConnectivityEvent{}} -> HighPriority + -- Transaction requests: low priority (can be delayed under load) + NetworkInput{networkEvent = ReceivedMessage{msg = ReqTx{}}} -> LowPriority + NetworkInput{networkEvent = ReceivedMessage{msg = ReqDec{}}} -> LowPriority + -- Client inputs: high priority (user-initiated actions) + ClientInput{} -> HighPriority + -- Chain events: high priority (must be processed promptly) + ChainInput{} -> HighPriority + -- | Inputs that are processed by the head logic (the "core"). Corresponding to -- each of the "shell" layers, we distinguish between inputs from the client, -- the network and the chain. diff --git a/hydra-node/src/Hydra/Node.hs b/hydra-node/src/Hydra/Node.hs index cc4f1ee9a2a..6bc17b7de43 100644 --- a/hydra-node/src/Hydra/Node.hs +++ b/hydra-node/src/Hydra/Node.hs @@ -43,6 +43,7 @@ import Hydra.HeadLogic ( aggregateState, ) import Hydra.HeadLogic qualified as HeadLogic +import Hydra.HeadLogic.Input (MessagePriority (..), inputPriority) import Hydra.HeadLogic.Outcome (StateChanged (..)) import Hydra.HeadLogic.State (getHeadParameters) import Hydra.HeadLogic.StateEvent (StateEvent (..)) @@ -226,12 +227,12 @@ hydrate tracer env ledger initialChainState EventStore{eventSource, eventSink} e ) wireChainInput :: DraftHydraNode tx m -> (ChainEvent tx -> m ()) -wireChainInput node = enqueue . ChainInput +wireChainInput node = enqueue HighPriority . ChainInput where DraftHydraNode{inputQueue = InputQueue{enqueue}} = node wireClientInput :: DraftHydraNode tx m -> (ClientInput tx -> m ()) -wireClientInput node = enqueue . ClientInput +wireClientInput node = enqueue HighPriority . ClientInput where DraftHydraNode{inputQueue = InputQueue{enqueue}} = node @@ -239,9 +240,11 @@ wireNetworkInput :: DraftHydraNode tx m -> NetworkCallback (Authenticated (Messa wireNetworkInput node = NetworkCallback { deliver = \Authenticated{party = sender, payload = msg} -> - enqueue $ mkNetworkInput sender msg + let input = mkNetworkInput sender msg + in enqueue (inputPriority input) input , onConnectivity = - enqueue . NetworkInput 1 . ConnectivityEvent + let input = NetworkInput 1 . ConnectivityEvent + in enqueue HighPriority . input } where DraftHydraNode{inputQueue = InputQueue{enqueue}} = node @@ -321,7 +324,9 @@ stepHydraNode node = do maybeReenqueue q@Queued{queuedId, queuedItem} = case queuedItem of NetworkInput ttl msg - | ttl > 0 -> reenqueue waitDelay q{queuedItem = NetworkInput (ttl - 1) msg} + | ttl > 0 -> + let newItem = NetworkInput (ttl - 1) msg + in reenqueue (inputPriority newItem) waitDelay q{queuedItem = newItem} _ -> traceWith tracer $ DroppedFromQueue{inputId = queuedId, input = queuedItem} Environment{party} = env @@ -391,7 +396,7 @@ processEffects node tracer inputId effects = do OnChainEffect{postChainTx} -> postTx postChainTx `catch` \(postTxError :: PostTxError tx) -> - enqueue . ChainInput $ PostTxError{postChainTx, postTxError, failingTx = Nothing} + enqueue HighPriority . ChainInput $ PostTxError{postChainTx, postTxError, failingTx = Nothing} traceWith tracer $ EndEffect party inputId effectId HydraNode diff --git a/hydra-node/src/Hydra/Node/InputQueue.hs b/hydra-node/src/Hydra/Node/InputQueue.hs index 20543e59054..52bcb27fac0 100644 --- a/hydra-node/src/Hydra/Node/InputQueue.hs +++ b/hydra-node/src/Hydra/Node/InputQueue.hs @@ -1,4 +1,6 @@ -- | The general input queue from which the Hydra head is fed with inputs. +-- This implementation uses a priority queue system to ensure protocol messages +-- (ReqSn, AckSn) are processed before transaction messages under high load. module Hydra.Node.InputQueue where import Hydra.Prelude @@ -7,22 +9,31 @@ import Control.Concurrent.Class.MonadSTM ( isEmptyTQueue, modifyTVar', readTQueue, + tryReadTQueue, writeTQueue, ) +import Hydra.HeadLogic.Input (MessagePriority (..)) --- | The single, required queue in the system from which a hydra head is "fed". +-- | The input queue system with priority support. High priority messages +-- (protocol messages like ReqSn, AckSn) are processed before low priority +-- messages (transaction requests) to ensure snapshot progress under high load. +-- -- NOTE(SN): this probably should be bounded and include proper logging -- NOTE(SN): handle pattern, but likely not required as there is no need for an -- alternative implementation data InputQueue m e = InputQueue - { enqueue :: e -> m () - , reenqueue :: DiffTime -> Queued e -> m () + { enqueue :: MessagePriority -> e -> m () + , reenqueue :: MessagePriority -> DiffTime -> Queued e -> m () , dequeue :: m (Queued e) , isEmpty :: m Bool } data Queued a = Queued {queuedId :: Word64, queuedItem :: a} +-- | Create an input queue with priority support. The queue maintains two +-- internal queues: one for high priority messages (protocol) and one for +-- low priority messages (transactions). Dequeue always tries high priority +-- first before falling back to low priority. createInputQueue :: ( MonadDelay m , MonadAsync m @@ -31,27 +42,40 @@ createInputQueue :: m (InputQueue m e) createInputQueue = do numThreads <- newLabelledTVarIO "num-threads" (0 :: Integer) - nextId <- newLabelledTVarIO "nex-id" 0 - q <- newLabelledTQueueIO "input-queue" + nextId <- newLabelledTVarIO "next-id" 0 + -- Two separate queues for priority handling + highPriorityQueue <- newLabelledTQueueIO "input-queue-high" + lowPriorityQueue <- newLabelledTQueueIO "input-queue-low" pure InputQueue - { enqueue = \queuedItem -> + { enqueue = \priority queuedItem -> atomically $ do queuedId <- readTVar nextId - writeTQueue q Queued{queuedId, queuedItem} + let queued = Queued{queuedId, queuedItem} + case priority of + HighPriority -> writeTQueue highPriorityQueue queued + LowPriority -> writeTQueue lowPriorityQueue queued modifyTVar' nextId succ - , reenqueue = \delay e -> do + , reenqueue = \priority delay e -> do atomically $ modifyTVar' numThreads succ void . asyncLabelled "input-queue-reenqueue" $ do threadDelay delay atomically $ do modifyTVar' numThreads pred - writeTQueue q e + case priority of + HighPriority -> writeTQueue highPriorityQueue e + LowPriority -> writeTQueue lowPriorityQueue e , dequeue = - atomically $ readTQueue q + -- Always try high priority first, then fall back to low priority + atomically $ do + mHigh <- tryReadTQueue highPriorityQueue + case mHigh of + Just item -> pure item + Nothing -> readTQueue lowPriorityQueue , isEmpty = do atomically $ do n <- readTVar numThreads - isEmpty' <- isEmptyTQueue q - pure (isEmpty' && n == 0) + isHighEmpty <- isEmptyTQueue highPriorityQueue + isLowEmpty <- isEmptyTQueue lowPriorityQueue + pure (isHighEmpty && isLowEmpty && n == 0) } diff --git a/hydra-node/test/Hydra/BehaviorSpec.hs b/hydra-node/test/Hydra/BehaviorSpec.hs index d4bc8c7de7f..1f0e874b060 100644 --- a/hydra-node/test/Hydra/BehaviorSpec.hs +++ b/hydra-node/test/Hydra/BehaviorSpec.hs @@ -34,6 +34,7 @@ import Hydra.Chain.Direct.Handlers (LocalChainState, getLatest, newLocalChainSta import Hydra.Events (EventSink (..)) import Hydra.Events.Rotation (EventStore (..)) import Hydra.HeadLogic (CoordinatedHeadState (..), Effect (..), HeadState (..), InitialState (..), Input (..), OpenState (..)) +import Hydra.HeadLogic.Input (MessagePriority (..), inputPriority) import Hydra.HeadLogicSpec (testSnapshot) import Hydra.Ledger (Ledger, nextChainSlot) import Hydra.Ledger.Simple (SimpleChainState (..), SimpleTx (..), aValidTx, simpleLedger, utxoRef, utxoRefs) @@ -1174,7 +1175,7 @@ simulatedChainAndNetwork initialChainState = do recordAndYieldEvent nodes history ev handleChainEvent :: HydraNode tx m -> ChainEvent tx -> m () -handleChainEvent HydraNode{inputQueue} = enqueue inputQueue . ChainInput +handleChainEvent HydraNode{inputQueue} = enqueue inputQueue HighPriority . ChainInput createMockNetwork :: MonadSTM m => DraftHydraNode tx m -> TVar m [HydraNode tx m] -> Network m (Message tx) createMockNetwork node nodes = @@ -1185,7 +1186,8 @@ createMockNetwork node nodes = mapM_ (`handleMessage` msg) allNodes handleMessage HydraNode{inputQueue} msg = - enqueue inputQueue $ mkNetworkInput sender msg + let input = mkNetworkInput sender msg + in enqueue inputQueue (inputPriority input) input sender = getParty node @@ -1292,10 +1294,10 @@ createTestHydraClient :: TestHydraClient tx m createTestHydraClient outputs messages outputHistory HydraNode{inputQueue, nodeStateHandler} = TestHydraClient - { send = enqueue inputQueue . ClientInput + { send = enqueue inputQueue HighPriority . ClientInput , waitForNext = atomically (readTQueue outputs) , waitForNextMessage = atomically (readTQueue messages) - , injectChainEvent = enqueue inputQueue . ChainInput + , injectChainEvent = enqueue inputQueue HighPriority . ChainInput , serverOutputs = reverse <$> readTVarIO outputHistory , queryState = atomically (queryNodeState nodeStateHandler) } diff --git a/hydra-node/test/Hydra/Model/MockChain.hs b/hydra-node/test/Hydra/Model/MockChain.hs index 9c25bd31a5b..7edd73f0474 100644 --- a/hydra-node/test/Hydra/Model/MockChain.hs +++ b/hydra-node/test/Hydra/Model/MockChain.hs @@ -58,6 +58,7 @@ import Hydra.HeadLogic ( Input (..), OpenState (..), ) +import Hydra.HeadLogic.Input (MessagePriority (..), inputPriority) import Hydra.Ledger (Ledger (..), ValidationError (..), collectTransactions) import Hydra.Ledger.Cardano (adjustUTxO, fromChainSlot) import Hydra.Ledger.Cardano.Evaluate (eraHistoryWithoutHorizon, evaluateTx, renderEvaluationReport) @@ -189,7 +190,7 @@ mockChainAndNetwork tr seedKeys commits = do , chainHandler = chainSyncHandler tr - (enqueue . ChainInput) + (enqueue HighPriority . ChainInput) getTimeHandle ctx localChainState @@ -375,7 +376,8 @@ createMockNetwork draftNode nodes = mapM_ (`handleMessage` msg) allNodes handleMessage HydraNode{inputQueue} msg = do - enqueue inputQueue $ mkNetworkInput sender msg + let input = mkNetworkInput sender msg + enqueue inputQueue (inputPriority input) input sender = getParty draftNode diff --git a/hydra-node/test/Hydra/Node/InputQueueSpec.hs b/hydra-node/test/Hydra/Node/InputQueueSpec.hs index 74ec0b096dd..47c00ef0eef 100644 --- a/hydra-node/test/Hydra/Node/InputQueueSpec.hs +++ b/hydra-node/test/Hydra/Node/InputQueueSpec.hs @@ -3,6 +3,7 @@ module Hydra.Node.InputQueueSpec where import Hydra.Prelude import Control.Monad.IOSim (IOSim, runSimOrThrow) +import Hydra.HeadLogic.Input (MessagePriority (..)) import Hydra.Node.InputQueue (Queued (queuedId), createInputQueue, dequeue, enqueue) import Test.Hspec (Spec) import Test.Hspec.QuickCheck (prop) @@ -22,7 +23,7 @@ prop_identify_enqueued_items (NonEmpty inputs) = test = do q <- createInputQueue forM inputs $ \i -> do - enqueue q i + enqueue q HighPriority i queuedId <$> dequeue q ids = runSimOrThrow test in isContinuous ids diff --git a/hydra-node/test/Hydra/NodeSpec.hs b/hydra-node/test/Hydra/NodeSpec.hs index 38c1abf482f..7162526b662 100644 --- a/hydra-node/test/Hydra/NodeSpec.hs +++ b/hydra-node/test/Hydra/NodeSpec.hs @@ -16,6 +16,7 @@ import Hydra.Chain.ChainState (ChainSlot (ChainSlot), IsChainState) import Hydra.Events (EventSink (..), EventSource (..), getEventId) import Hydra.Events.Rotation (EventStore (..), LogId) import Hydra.HeadLogic (Input (..), TTL) +import Hydra.HeadLogic.Input (inputPriority) import Hydra.HeadLogic.Outcome (StateChanged (HeadInitialized), genStateChanged) import Hydra.HeadLogic.StateEvent (StateEvent (..), genStateEvent) import Hydra.HeadLogicSpec (inInitialState, receiveMessage, receiveMessageFrom, testSnapshot) @@ -333,11 +334,15 @@ spec = parallel $ do entries <- fmap Logging.message <$> readTVarIO logs entries `shouldSatisfy` any isContestationPeriodMismatch --- | Add given list of inputs to the 'InputQueue'. This is returning the node to --- allow for chaining with 'runToCompletion'. -primeWith :: Monad m => [Input tx] -> HydraNode tx m -> m (HydraNode tx m) -primeWith inputs node@HydraNode{inputQueue = InputQueue{enqueue}} = do - forM_ inputs enqueue +-- | Add given list of inputs to the 'InputQueue'. A preceding 'Tick' is enqueued +-- to advance the chain slot and ensure the 'NodeState' is in sync. This is +-- returning the node to allow for chaining with 'runToCompletion'. +primeWith :: (MonadSTM m, MonadTime m) => [Input tx] -> HydraNode tx m -> m (HydraNode tx m) +primeWith inputs node@HydraNode{inputQueue = InputQueue{enqueue}, nodeStateHandler = NodeStateHandler{queryNodeState}} = do + now <- getCurrentTime + chainSlot <- currentSlot <$> atomically queryNodeState + let tick = ChainInput $ Tick now (chainSlot + 1) + forM_ (tick : inputs) $ \input -> enqueue (inputPriority input) input pure node -- | Convert a 'DraftHydraNode' to a 'HydraNode' by providing mock implementations. From 387a3a279956c9cf01f41b62493147a2bbe468c6 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Tue, 6 Jan 2026 08:07:23 +0800 Subject: [PATCH 02/11] ci: add disk cleanup step to prevent space exhaustion GitHub Actions runners have limited disk space (~14GB available). When building uncached Nix derivations (like our modified hydra-node), the build can exhaust disk space during compilation. This adds a cleanup step that removes unused tools before the build: - .NET SDK (~1.8GB) - Android SDK (~9GB) - GHC (~5GB) - CodeQL (~2.5GB) - Unused Docker images This frees up ~20GB of disk space, ensuring builds complete successfully. --- .github/actions/nix-cachix-setup/action.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/actions/nix-cachix-setup/action.yml b/.github/actions/nix-cachix-setup/action.yml index fcf19333bb5..6c24c24a284 100644 --- a/.github/actions/nix-cachix-setup/action.yml +++ b/.github/actions/nix-cachix-setup/action.yml @@ -9,6 +9,21 @@ runs: using: composite steps: + - name: ๐Ÿงน Free disk space + if: runner.os == 'Linux' + shell: bash + run: | + echo "Disk space before cleanup:" + df -h / + # Remove unnecessary tools to free up disk space + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/lib/android + sudo rm -rf /opt/ghc + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo docker image prune --all --force || true + echo "Disk space after cleanup:" + df -h / + - name: โ„ Prepare nix uses: cachix/install-nix-action@v30 with: From 9787bce661ee5a6b540fa174b0fb33696d1d116e Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Sat, 3 Jan 2026 19:21:30 +0800 Subject: [PATCH 03/11] ci: enable Docker builds for pull requests - Add pull_request trigger for PRs targeting master branch - Tag PR builds as pr- for easy identification - Use PR head SHA as version for traceability --- .github/workflows/docker.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index e0021638606..b716d2bcba0 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -13,6 +13,8 @@ on: push: branches: [ "master" ] tags: [ "*.*.*" ] + pull_request: + branches: [ "master" ] workflow_dispatch: inputs: ref_name: @@ -73,6 +75,12 @@ jobs: # And the version as the git commit. VERSION=${{github.sha}} + # For PRs, tag as pr- + if [[ "${{github.event_name}}" == "pull_request" ]]; then + IMAGE_LABEL=pr-${{github.event.pull_request.number}} + VERSION=${{github.event.pull_request.head.sha}} + fi + # Determine whether we are building a tag and if yes, set the label # name to be the tag name, and the version to be the tag. BUILDING_TAG=${{github.ref_type == 'tag'}} From 00c513baacff0181465c782261ea7e7930e8ab39 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Tue, 6 Jan 2026 17:59:37 +0800 Subject: [PATCH 04/11] ci: trigger CI and Docker builds for v1.2.0-base branch --- .github/workflows/ci-nix.yaml | 4 ++++ .github/workflows/docker.yaml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-nix.yaml b/.github/workflows/ci-nix.yaml index 8477afbd02d..d3843fa4815 100644 --- a/.github/workflows/ci-nix.yaml +++ b/.github/workflows/ci-nix.yaml @@ -11,7 +11,11 @@ on: branches: - master - release + - v1.2.0-base pull_request: + branches: + - master + - v1.2.0-base schedule: # Everyday at 4:00 AM - cron: "0 4 * * *" diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index b716d2bcba0..f9262c0609f 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -11,10 +11,10 @@ concurrency: on: push: - branches: [ "master" ] + branches: [ "master", "v1.2.0-base" ] tags: [ "*.*.*" ] pull_request: - branches: [ "master" ] + branches: [ "master", "v1.2.0-base" ] workflow_dispatch: inputs: ref_name: From 15558142d7eddfeb713490396c5d39317de07daa Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Tue, 6 Jan 2026 23:29:57 +0800 Subject: [PATCH 05/11] fix: add NodeStateHandler import to NodeSpec.hs The primeWith function uses NodeStateHandler to query the current chain slot for the Tick event, but the import was missing. --- hydra-node/test/Hydra/NodeSpec.hs | 1 + 1 file changed, 1 insertion(+) diff --git a/hydra-node/test/Hydra/NodeSpec.hs b/hydra-node/test/Hydra/NodeSpec.hs index 7162526b662..38d54cfec9f 100644 --- a/hydra-node/test/Hydra/NodeSpec.hs +++ b/hydra-node/test/Hydra/NodeSpec.hs @@ -29,6 +29,7 @@ import Hydra.Node ( DraftHydraNode, HydraNode (..), HydraNodeLog (..), + NodeStateHandler (..), checkHeadState, connect, hydrate, From d51084b0140f687b88e70f35480f95721a3b20a4 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Tue, 6 Jan 2026 23:40:00 +0800 Subject: [PATCH 06/11] fix: add MonadTime constraint to testHydraNode for primeWith compatibility --- hydra-node/test/Hydra/NodeSpec.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hydra-node/test/Hydra/NodeSpec.hs b/hydra-node/test/Hydra/NodeSpec.hs index 38d54cfec9f..e6b76d4b17c 100644 --- a/hydra-node/test/Hydra/NodeSpec.hs +++ b/hydra-node/test/Hydra/NodeSpec.hs @@ -451,7 +451,7 @@ runToCompletion node@HydraNode{inputQueue = InputQueue{isEmpty}} = go -- | Creates a full 'HydraNode' with given parameters and primed 'Input's. Note -- that this node is 'notConnect'ed to any components. testHydraNode :: - (MonadDelay m, MonadAsync m, MonadLabelledSTM m, MonadThrow m, MonadUnliftIO m) => + (MonadDelay m, MonadAsync m, MonadLabelledSTM m, MonadThrow m, MonadUnliftIO m, MonadTime m) => Tracer m (HydraNodeLog SimpleTx) -> SigningKey HydraKey -> [Party] -> From 4107db2c9f1e24d434b7bce2ba04844ff8a8fec5 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Wed, 7 Jan 2026 08:51:21 +0800 Subject: [PATCH 07/11] fix: update RotationSpec test expectations for primeWith Tick injection The primeWith function now injects a Tick event before processing inputs, which increases the total event count in tests. Updated test expectations: - 'rotates while running': 5+1 tick = 6 inputs, RotateAfter 5 - 'consistent state after restarting': 5+1+2 ticks = 8 inputs - 'rotated and non-rotated node': 6+1 tick = 7 inputs - 'restarted and non-restarted node': restarted has 8 events (2 ticks), non-restarted has 7 events (1 tick), so eventId differs by 1 --- hydra-node/test/Hydra/Events/RotationSpec.hs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/hydra-node/test/Hydra/Events/RotationSpec.hs b/hydra-node/test/Hydra/Events/RotationSpec.hs index 3b30affac05..2c6d0c451dc 100644 --- a/hydra-node/test/Hydra/Events/RotationSpec.hs +++ b/hydra-node/test/Hydra/Events/RotationSpec.hs @@ -42,9 +42,9 @@ spec = parallel $ do it "rotates while running" $ \testHydrate -> do failAfter 1 $ do eventStore <- createMockEventStore - -- NOTE: because there will be 5 inputs processed in total, + -- NOTE: because there will be 6 inputs processed in total (5 inputs + 1 tick), -- this is hardcoded to ensure we get a checkpoint + a single event at the end - let rotationConfig = RotateAfter (Positive 3) + let rotationConfig = RotateAfter (Positive 5) let s0 = initNodeState SimpleChainState{slot = ChainSlot 0} rotatingEventStore <- newRotatedEventStore rotationConfig s0 mkAggregator mkCheckpoint eventStore testHydrate rotatingEventStore [] @@ -56,7 +56,7 @@ spec = parallel $ do it "consistent state after restarting with rotation" $ \testHydrate -> do failAfter 1 $ do eventStore <- createMockEventStore - -- NOTE: because there will be 6 inputs processed in total, + -- NOTE: because there will be 8 inputs processed in total (5 inputs + 1 input + 2 ticks), -- this is hardcoded to ensure we get a single checkpoint event at the end let rotationConfig = RotateAfter (Positive 1) let s0 = initNodeState SimpleChainState{slot = ChainSlot 0} @@ -84,7 +84,7 @@ spec = parallel $ do let inputs = inputsToOpenHead ++ [closeInput] failAfter 1 $ do eventStore <- createMockEventStore - -- NOTE: because there will be 6 inputs processed in total, + -- NOTE: because there will be 7 inputs processed in total (6 inputs + 1 tick), -- this is hardcoded to ensure we get a single checkpoint event at the end let rotationConfig = RotateAfter (Positive 1) -- run rotated event store with prepared inputs @@ -115,7 +115,8 @@ spec = parallel $ do let inputs2 = drop 3 inputs failAfter 1 $ do let s0 = initNodeState SimpleChainState{slot = ChainSlot 0} - -- NOTE: because there will be 6 inputs processed in total, + -- NOTE: because there will be 8 inputs processed in total for restarted node + -- (3 inputs + 3 inputs + 2 ticks) vs 7 for non-restarted (6 inputs + 1 tick), -- this is hardcoded to ensure we get a single checkpoint event at the end let rotationConfig = RotateAfter (Positive 1) -- run restarted node with prepared inputs @@ -142,7 +143,8 @@ spec = parallel $ do [StateEvent{eventId = eventId', stateChanged = checkpoint'}] <- getEvents (eventSource rotatingEventStore') checkpoint `shouldBe` checkpoint' -- stored events should yield consistent event ids - eventId `shouldBe` eventId' + -- note the restarted node has more Tick events (one extra per primeWith call) + eventId `shouldBe` eventId' + 1 describe "Rotation algorithm" $ do prop "rotates on startup" $ From 0ae2ff90f5cd43a81dc51fea291b2f8c4dc8a6a9 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Wed, 7 Jan 2026 10:49:49 +0800 Subject: [PATCH 08/11] fix: correct RotateAfter config for 6-input test with Tick injection With primeWith adding a Tick, we have 6 events total. To get checkpoint + 1 leftover (2 events), rotation must trigger after 5th event. RotateAfter(4) means rotate when count > 4, so 5 > 4 = TRUE triggers rotation. --- hydra-node/test/Hydra/Events/RotationSpec.hs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hydra-node/test/Hydra/Events/RotationSpec.hs b/hydra-node/test/Hydra/Events/RotationSpec.hs index 2c6d0c451dc..fb25e87d0b7 100644 --- a/hydra-node/test/Hydra/Events/RotationSpec.hs +++ b/hydra-node/test/Hydra/Events/RotationSpec.hs @@ -43,8 +43,10 @@ spec = parallel $ do failAfter 1 $ do eventStore <- createMockEventStore -- NOTE: because there will be 6 inputs processed in total (5 inputs + 1 tick), - -- this is hardcoded to ensure we get a checkpoint + a single event at the end - let rotationConfig = RotateAfter (Positive 5) + -- this is hardcoded to ensure we get a checkpoint + a single event at the end. + -- With RotateAfter 4: after 5 events (5 > 4 = TRUE) rotation happens, + -- then 6th event is stored separately, giving us 2 events total. + let rotationConfig = RotateAfter (Positive 4) let s0 = initNodeState SimpleChainState{slot = ChainSlot 0} rotatingEventStore <- newRotatedEventStore rotationConfig s0 mkAggregator mkCheckpoint eventStore testHydrate rotatingEventStore [] From c4055c0c54dc0d125bfc0e23c2491bb3f1ead63a Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Thu, 8 Jan 2026 14:40:22 +0800 Subject: [PATCH 09/11] feat: implement hybrid snapshot throttling for Hydra Head protocol Add configurable batch-size and time-interval based snapshot throttling: - Add --snapshot-batch-size CLI option (default: 10) to trigger snapshots when pending transaction count reaches threshold - Add --snapshot-interval CLI option (default: 0.1s) to trigger snapshots after time interval when transactions are pending - Track lastSnapshotTime in CoordinatedHeadState for time-based throttling - Implement hybrid logic: snapshots trigger on EITHER batch size OR time interval - Add requestedAt timestamp to SnapshotRequestDecided for observability Backward compatible: set snapshotBatchSize=1 for legacy per-tx behavior. Files changed: - Environment.hs: Add snapshotBatchSize/snapshotInterval fields - Options.hs: Add CLI parsers and defaults - HeadLogic.hs: Implement batch-size and time-based throttling logic - State.hs: Add lastSnapshotTime tracking - Outcome.hs: Add requestedAt to SnapshotRequestDecided - api.yaml: Update JSON schema with new fields - Test fixtures and golden files updated accordingly --- hydra-node/golden/Greetings/Greetings.json | 4 +- ...nablySized (HeadState (Tx ConwayEra)).json | 2 + ...nablySized (NodeState (Tx ConwayEra)).json | 1 + hydra-node/golden/RunOptions.json | 408 +++++++++--------- .../golden/StateChanged/Checkpoint.json | 1 + .../StateChanged/SnapshotRequestDecided.json | 1 + hydra-node/json-schemas/api.yaml | 17 + hydra-node/src/Hydra/HeadLogic.hs | 100 +++-- hydra-node/src/Hydra/HeadLogic/Outcome.hs | 9 +- hydra-node/src/Hydra/HeadLogic/State.hs | 6 +- hydra-node/src/Hydra/Node.hs | 4 + hydra-node/src/Hydra/Node/Environment.hs | 59 ++- hydra-node/src/Hydra/Options.hs | 69 +++ hydra-node/test/Hydra/BehaviorSpec.hs | 2 + .../test/Hydra/HeadLogicSnapshotSpec.hs | 6 + hydra-node/test/Hydra/HeadLogicSpec.hs | 9 + hydra-node/test/Hydra/NodeSpec.hs | 4 + hydra-node/testlib/Test/Hydra/Node/Fixture.hs | 2 + 18 files changed, 461 insertions(+), 243 deletions(-) diff --git a/hydra-node/golden/Greetings/Greetings.json b/hydra-node/golden/Greetings/Greetings.json index b02d192d00c..e48ec19b5d5 100644 --- a/hydra-node/golden/Greetings/Greetings.json +++ b/hydra-node/golden/Greetings/Greetings.json @@ -10,7 +10,9 @@ "party": { "vkey": "348cdf360a866dedf92cb18cfb736c2da212ea44d2f1270fe60dad88801be97c" }, - "signingKey": "23db2c1b05cc1c905486253e1a39b0df087217f33180f7d5cf4aa1ec8c0b6634" + "signingKey": "23db2c1b05cc1c905486253e1a39b0df087217f33180f7d5cf4aa1ec8c0b6634", + "snapshotBatchSize": 10, + "snapshotInterval": 0.1 }, "headStatus": "Closed", "hydraHeadId": "01000001000001000000000001000101", diff --git a/hydra-node/golden/ReasonablySized (HeadState (Tx ConwayEra)).json b/hydra-node/golden/ReasonablySized (HeadState (Tx ConwayEra)).json index cf6ff49c894..c3ad7b3659c 100644 --- a/hydra-node/golden/ReasonablySized (HeadState (Tx ConwayEra)).json +++ b/hydra-node/golden/ReasonablySized (HeadState (Tx ConwayEra)).json @@ -2310,6 +2310,7 @@ "txId": "d6027fac0eed420dfca6aa6f6aa842eb7446f64e26f77cc96bff77fe0af81373", "type": "Tx ConwayEra" }, + "lastSnapshotTime": null, "localTxs": [ { "cborHex": "84af00d90102828258204d75d241f2d1fd0fdcc056940d3c6c7c619e3c5d327005e11a087a872e51ce3705825820913dc98e17442763722145725168c151fdc1ca3c8135d905af5365ac8c69e6de060dd9010281825820e20d1d5a79c3334fcb549724dba3085e55842361dd001cb681e89dfe6efddf850212d9010283825820511a3f4bfffb55c66bd101286dbb08a8ca0295cd6cf02e6869a8a6890894551e07825820e185009dd0cdbdbb1fa7fa4b1125faa8cfb06b666cff5af224924d9cb4a4598c03825820e8431283d3920919640894affaa42b0a010016e5751e41f1bb4344743825f750000184a400583931bb0df19a0000dcb9b0c13d489d1e0765ffc98c9407f3b1cb40b433086ed72966e234563d13e97dc3a146b293a08c6809073a1842877f8fd8018200a1581c245d5a7a06fe18358242e81281cd5ba9e6abe4efc54e7b659f25abaea14133010282005820266409b2df645cf251baff416e40ead2e4b74bfd9fd8076d312e8fd0664d386d03d818458200820404a400581d61cc09d13a35cb26adff0cea28ac81fadf1a2d4d2c51c76c1d9363825c01821b0a10f918fb1dc66da1581cf1d1834eb74792624b108bf125b1953ab90dc80ed9ea1abe17ceadbaa1581c296ebe807967bf0930b6716ccd4574e6442170326f5e4780706b8b121b119800e0fcb29d40028200582030e503aa73b7674ea23f8f517ecfff96c7a47ea2166260a02f277e0478e361ca03d818458200820516a300583910232fa2347ddfea3f76cde0490666601b5c0a52e7f78f8866bfbe15d33f9c01ed8bf6fcf72959b8814c292308006f95f664b8140af4881bfa01821b578ae17fa636cfd6a1581c2db8410d969b6ad6b6969703c77ebf6c44061aa51c5d6ceba46557e2a156f3ccfd4bb922004b257b8eaf6c6781e6b0f8dfe6a34d0103d81849820146450100002261a300581d707a7730288ef3f43f17fdeb91beef3a7949202e4d96d5d0b70e458f6401821b40f1b52c3061c607a1581cc714ad6dfdce910c7a33831418358abf5ee25435eb9efd96e1c61dc9a1451bbbe4f0f51b52dfa1e5caecdb1d03d8184b820248470100002220010110a300583931bd59d5b0a7f37ecb891f50036bad512993fa446ada40dbffd57facf5f32e0c1428b66b3725e6f9f42f9845cba38b5256cec61cf4c26df07d018200a1581c4a1c412d8e2b3015a7fb7d382808fb7cb721bf93a56e8bb6661cdebea141361b30d73265d1e9652603d8184a82024746010000220011021a0005eb0b04d9010281830e8200581c4d96bc8cf30ed80a26542f7998e08494ca1e7fe4b215cc1cd360252f8201581c64b6485d329c9f8f89f3a3678e8dc9d9a596b71332e5b259a135640305a5581df083c7a75ad116f3a230a860a2b1eb3e4874aec674976327f565648b8e1a0008fa34581df0daed44ce93100e22a8a432f952df8719479807b963c2e2b69d6fcf711a000b9dff581de077f4b771704eee2b390baa12d31dc67cd948cfa7805ca1cc35a10d6c1a000ea603581df10a97ad03ccba43193db6bcb456017b3b2a8e8abb4cf763cf8f56c22d1a0002c8c7581de11e9f7f9ca5bc02531f8cddf9690e24151e10e96d4e704328b8a3bb9a1a0003974d08010ed9010283581c0e1d6ce0df0b87128be46f5c4e87c8b3e6a743608376a6462cfd4c78581c7e8db2be026e79b44d52a4c7b1b8a16403a3493d7455ba942f5b5fa4581cdb2306addc7d14993f32ef524184614108ad90df1775cbf35689341d09a1581c8f461954fe2f18fee1dca233f358907e643ff839ed1f995e4bf325e3a1491aa8d136397871cf601b678362c41ff3007a0b58200d81678dfde49780d0d21fb89742cb9ba50e51ff548a279ac472860d3ba442be13a28200581c5f6ab18eece0ec7abb37935921609f48d03a1007b8dd39808cce5f47a582582005629f1bfc725910f2c3802b224c42c465796c36ab510105d8185f454af81af506820282781c68747470733a2f2f5277775a6a70754b703365452e35486c2e636f6d5820e7771884bbe85fc46f65063205e911f39b26573cfa2aac5544eac466698200648258201aad97bd9d8b9a9c6eae9518e790f730590e3cd94c3d0fdbe9473b9348ff135904820082781d68747470733a2f2f7a6c78684f686b4c322e343444393257362e636f6d5820b3bfed33f98b3609c42e0271a2a23857434b80b100402476d92214ac24c833f2825820b892fb5e0b35aca7555bb461e203aed24ed9d2417e866165283070feaaaad43a03820182783b68747470733a2f2f6a7445645544567a353578446a41494f654861726e736a32713453695177346b457137765a5638775148476d482e6d2e636f6d58208e3e33c990028f73e75a26425271ed10fab0e8060516ee365635781fd18c35ad825820cbf8b66d9714f534e965a9917b874dab830aa068d1db5561ccaf871f91681ef5008201f6825820d0bcb2bd1bd592b2d938a6ccc9108679bec043b8e3286c2501397d94c8008c1c058200f68203581c21760c3f4d2e032585581c5d4cde3f5988db057d75cbaa0f7556baeda682582026f3eabdc04335f72e9d6c57959e70cf99b9e11b5e22e582c67171e4e162318904820282781b68747470733a2f2f68346c61674e584a324c436168692e2e636f6d58204748c6142018e1d9081284fa098441e8f0a96e4ab5e1c5edac0deaeaedc68a9f8258203fe021d092773127fcd1c2eff209d383db7db04e0f8c3973e569f4e01b47870b058200827768747470733a2f2f6951316b454763797834682e636f6d58209d549fe7478215a4719a1b0d74ac62afe2cd49f8bd52a195438cce5f6d26df648258205e5a09e0c75e84393e299e3414b6744d179741400b650f22fbf367c94de52f1c06820182783f68747470733a2f2f6f3661506c4833617a6a317979504748393254716368744a636567513555724f4a7a79614838437a6d63376d436d39624d66712e636f6d5820c72864bc22b1d59571eac89bf0a28adedbed684ef99c355ea92c41a6b6e401a0825820868568e1ec7c279e71ac8f0a1837cf2a1cfe5e32cacfdb3c6b13698e2618024807820082782f68747470733a2f2f434a4b69666b34704b2e756b737133466a58454e474653724844502e56323145422d432e636f6d5820beaa76949118dd9b39412b2867da39f5c83d3ffea67e00620c9e364ea9f820248258209170302fdf73e68fec4b90f2fdb8aba02f463d4064c96f866fc006a7a69b2aa1028202f6825820f2adba217bc5cf57a3c114457c7ad18ccd7503d474e145ecb4b0159bfe495db5028202f614d9010286841a0008e174581de0da87a9b1e23d92d8e223627e27fb17b8638bf1ff22b32c1b6bf3ccca8301825820d3ea34f02d214adcdeb9276154519abb178c08d3093cb45a5aaba7258b7ac71005820a0382782268747470733a2f2f4f3655716d7a6a6a434876394d6d7a4276614c6e714a2e636f6d582048e4c27c436ceaad9e81c37684c4629eadfeae8c61153611cd39b15940dcfc48841a000bca01581df0f944d4fc5ec25a61b339e579723d9fe67892d93de4937e222d493b808305825820c5aa8541bbfb8b948ebec132030175ecca2ded9ba567e7b0f72cf341473bd1d10182827268747470733a2f2f535a69386e542e636f6d5820856f7be66543bd5f87a94dc6c6e3d81be7bf06d486f4fe9b6beec9b4d961741c581cfbf9555b4f5d730ff5d5edce99faf54e4ca5f8493b8320b27163a49682783d68747470733a2f2f32694b57593147474c4b444e654d71736c5377394f4334566e78706e326b5151793955496353596f3768426e6a74554a4c2e636f6d582091a4f13f8dbca2fca1cf5167fc5c808dfe1c162ffd53d13c5b1b67eddd9ea953841a00062e7d581df00891cb1cc5c450c0d44709fe9e26f39d334038f47c369152354a40a3810682782d68747470733a2f2f6463464f4a796a38352e503530415046456e6d6541554c67566652776a74486c412e636f6d58206a44e657657d2742be84994cb4b2ea1be81497456527dc9561b3f83f3271f839841a000e9b91581df123beac60fa95a3df18331e46d4dbaaa9b87e9003f6e1a77f91a252cf83018258201f5f3b45c424be3b1f1290e438304a2fcedff5e56d0f6ebbba619cf228c823a90682020282783d68747470733a2f2f395a6968514a4c6161787a346c6e712e4e715055344b4461704b747141683662677277454c4f487a4b79506934755742572e636f6d5820aa4f4c699d54a926ce7c6ff7d86ab14ad747934e560179922b5f2624ebcca4c0841a0009bc5c581de146642af8ae2e4a3b22b5598f171e6e27502bdbc8906374e6dea0c03f8302a5581df044f7bb981f9bcfdbf484ff1447d24ae98fb9dcd6beb94de23f8a5e861a0003924d581df0acc328204abffe2373c52514ad551a7b379673ef13e40126bbe249fa1a00044272581de03413a922af0655bdbcdd126dd9cd87767b855ca6d1427fc1c55377ef1a0001fd70581de07dda87f4e1d91258a549d5c289b58010f240746d5d7786698328e9551a000a8284581df1f1e1f686caa644b21e9169a3582d628a6696be02b8a7c0ab7218e9181a00064778581c30a4d377479f2d7f7b983ce36edc5133d028e49d739fdafcb55d071a82783868747470733a2f2f65396e6677492e57567951347438434368716a616a67424b584864367a2e56743666545451674d74313243502e636f6d58204cebc24852fb5eca3387b492d6d508895e41a28479513d27829ded17ca6147e9841a0008f22b581de1e7cb3e41b3c4ecbfa303ef5bcc5ca98d13b1b942f6abc13474688fcf8504825820017db575738495ff456c3c0880a24902096e6e006ae3712d31cef762e43d419501d90102868201581c94d8a45f5dcb607fd7e80f35c54f0fe715617c2b227a6ef3fe2cd29c8201581cb17421d5ce4a3d560bb633ad30acdd6163fa2871b491c6fd9b9530f88201581ce4bdabb3c99b4fb90cf0ec100f97f88a227397e55c24b6c4199a4f3d8200581c51116b40101e98009a5bd59d0b1ff674caa77c86a941e770cab683258200581ca2fb6e859d5578c4ae1295c762945ebe1813ee9d179ad32b561daa288200581cc5ae5fddbc3b22bcab4877d2f21931f64d8fa62d09985fbd4def2747a38201581c3d720ffca2cf5b44e56635a244bda5dd56169514ab3a869d02d64901048201581c8a403437be132e4886f34c283019ecb16c1a815f9df06e9984b412b6078201581ca5ea52c14f8ee1df51e4767f18403b43637e6a48af708ad4801dea2a05d81e821a10e4da871a3b9aca00827168747470733a2f2f71586b6a4b2e636f6d5820bc4775aa024a9fd2b5144df6dd571227acb8fb48bad02a3052e6b1727abdc957161a000ad8a5a500d90102828258209420d30d33a91c38b12e182c18d3d71b70bfe6f0fc9ee469f149c5674d6ebb6758404aa06a473bf8b87c2513f79517eb3eea0e05d7cb792849378de30d1efca861b5025d371503dac31564441f21d4ae745e95e764d266784f4fd03a745ddec95faf825820d360b77dfcaaef0dcab3b34daff2244596ec880d69ea5851cbdf7cacedc001c6584075ccc220507bc2875d1f3d5cedbe91f652318114156f5135a3705ee2ba1b69ec7240f92083425ae12cbe8b42bc2d9ac182955e88049fe5c58440716adc29c70202d901028384582058be7b50fa126788e00838b978a29b8d7f8fef43e362e13f2f01d643ea07e3eb584010953421223fc94e76512d1bde8c700253bc173fa7af54a39083a28e5f1e734351f938b9ae6906619fbea1f822b362fa2556c7582fa5e1213dc4795ddbb46efb45aa2bb4f2de44f2299893845820e053694a0b079c4802e8f82a8b19593b94d6e5bef5039bd832dca9d4a2d61d0e584075e33d6a9e81eb2bba4f362d0ada92321a52f5863862f9ae46f37745a107f3a87e0996b00a3cb8c32fbd172b0de08e3850ad98a41d6eecd9cb3972a14ac604bb41f244a53db2e28458203f51938a3aa87342e604ea51f7083d3b18cbff5dbe4466470bde57d25705e1285840b0b9d3975f4b61f2026aae805651d1273ac3ebe416486daa9fde190ebc88b7dc570b5679d708e2a2da760e9f23bfafc2d0e421d63d7c4ff2ac7de05c8c0f3a36433b0d644520b234b7bb01d9010284820181830300808205181d8201838202828201828200581c2b7b3cf60c3bcd5d071e28435eded816da43b6997e6329bcdec000fe8200581c8ed64496958f677ef694274be1242f8c815e995d9ceeff5ae7cbdac28201838200581cb58f362102e7e09e7c6ad46efd31ba913eb3fd065f79bbfbcdee09298200581c7a9ff1a436cd02ede31fb2c309c770b4572e00a6b7f8c8ae66ba5a718200581c1d5fcdebda8aa964dd90fb3d3b0270bab1d81d3fd4730623a6ff06c08200581c1813e00aff7aa5591755947b6862657b8daa3c2e2b42cead028e1d048201848200581c168096caa78ee97bdba9a200bc70c2f498898f59f5e91c89b1b3036a8201808200581c786772ebd52e0df45a539f7d5d3adafdca012fb56e7e6bc72d02e0cf8202838200581c64bebce6baecd2975a83466f836b0f9aeea99c8ef5edf03656ead05b8200581c874c831b8c2649ccfd05a41e2f77030101a9597cd26ea48261e766758200581cd052512453b0b4f2cf3de6900a5db549d25c3df4d23749df85b509f7830303848201838200581cfdb66b8b705f4f476c44f21bf91d345bdb5ba820114bbddddeb5a09d8200581c44cffd194cf16136527839c3ec709577c3b5b861e5fecf6a39530f93830302828200581cb677f69e8003e9a5668401aafdd7924ca44372950b4cf1ab4bea55678200581c87f1c282adf7fe00ef4facbf2ebb9093be25ab8bcf0d57aeb69e1b0a820282830300808201818200581ccaf4eb9dc498e51f59d6d3e3f2c97d5569eb00483e246585d8e467a882028082018182028004d9010282d87b9f412a425e939fd87a9f04ffd87b9f4407db0ee5232421ff42893bd87a9f03ffd87c9f439457d10223ffffff9f40d87c9fd87e9f431c581123022242afa7ff22ffff05a582010182a3a2a2434fa8b80523414dd87e9f44c8ef56eb0041e705ffa22341ca2403a12300d8799f40ffd87b9fd87b9f44a6f6dac1417543e63cb542840bffa00200ffa22304238023d87a80821b3d68ec25abda1a6c1b5af03878f3fc7dbb820207829f2221ff821b61ac7df950697b951b49857b91882048f9820302829fa2a2032244fbcbb844424c6ca342011f2140212340d87c9f054020ffd87d9f43fd1165ff01ff821b7541b8e487e6d0511b310bb76c3fe37d4a82040382d87e9f2443946b76ff821b42ab6d8caa601c6f1b29af13ce9ae5578a8205028200821b7b367ef88023e1f41b4f661f897b18a580f5f6", @@ -3328,6 +3329,7 @@ "txId": "0439d1c4fa905197e34cf28a85f2f7206517ebabb6b655bf917a3bc0da15246c", "type": "Tx ConwayEra" }, + "lastSnapshotTime": null, "localTxs": [ { "cborHex": "84ae00d9010283825820196ba9949e9306379081b1acea0d4564ee11d093349016195f73fe5bb637ec0f058258203dfea73f0f18b6c16a75a8589a5dc9606769117bfda7a78341a85b1aada69a7e04825820a75cc5685e903f7f398a38588ff7ce28f952c49cb1476542ebf46485c955a51c0512d9010283825820227850c56522f09b117c211877575ba7136d1adc74bc74894294f009591e3fe203825820808c1c4095bf927c2f7d9188551f5b4d43d67573318eb3324d3d719586e5669401825820eeabc6c4f3c94f86933d3a4e0718f8cafb9eb23d551b5823d4a16baf8b44660703018010a300583920da83b55add9d6d2812de0f7ae33110428e130eadfeeee79699e3d3a669124b69506540995bc6a68b797a179cfad426b2ff7aaf31e9ae4165018200a1581c2db8410d969b6ad6b6969703c77ebf6c44061aa51c5d6ceba46557e2a14e801df6667c99864e37cb62c4d38f1b1ac85d2e1e1a1dba028201d8184140021a0008bb9a04d90102848a03581c49e6c5764e5258b548fd21bc23a4f2cf953380fac8a2f6b93174d86d5820c2af75296ef39a234b52f5fb6bdbc627a9d7d10eef696675a502ba6e88daa7141a000796611a000de3e8d81e821b01618ceac974c67f1b01bc16d674ec8000581de0cdc5fc248ee2472d2c9c1338fefbbe3875f324436e0a34da33c8f88dd901028083840003440000000550040000000200000000000000010000008202676639572e636f6d8400054400000003f6f68304581ca5dcfb29486df3810121d5106ac4fbca8251fb867273875eeb24c8620d82008201581cfa4871c4ab8ddfa6e3e55e81242a4d6b277ff6a90a7ff4dee00c5bef830e8201581c4f6940c0c3a24299d5b4a49394132b5efc832d9ec68794374aca86498201581cfb4802d55aaeca23807020614321edcd084747f592cde9e0d4ac471b05a6581df0bc5793f2aee853012e12bd2c3fe512801e2fdc9cdbb294bd761cf7011a000a59b8581de0e5d91b9b774eece323b19d2ac84b0a55f96976e73f27a6091c3eeda51a00034978581de0e7c4d45942558525db51a3ce7553f068aefdc0414b40235a141e6cc61959da581df1d73c4dd7033fcf581acacab6842fc59615df6d78ee24ce6b9ce02f261a00082b95581de117512fa90960e2cc39779e2a6e55f9f1c038843fa37f894f7b3a6b431a00035395581de16efee7c53549ce3d13c41a0f0623477fe2168abb97774df5ed17765d1a0004125008010ed9010285581c5a20d1a159a88c9f9cccc3e67a6647b903e91c39f0d098b5c6c68926581c5c236eb87b7715a3bc68d19d4a246c1d17ec14bbb37288bd10ce24ca581c7587fc2c2fecbf7f0ed838db1014effaeae542b21edd2e0ed1757b4c581cdf3536942252dba1841aa885a694337c77083d6b26631e818d51e09b581ce558aa60e00d87be9a5c80becd8a637166353700e388c309b38b381309a1581c007ff8c1cf3f8f3db37f6c9491ebb2d2181ee72e82fa939bca6086f1a147a1f94ff7a558611b4debc029bf40a76e07582017306cac4f3b09e43cb4b3d656da06cae052aba728ee94911c258fc17249fcf70f0115196a3d161a0008971ea500d90102858258201bae19151846f8355a5b9c8d5bdb1df3e95ff5f3aeacb0c1f49af1495b3f7fb9584071230d3ae7f92aca58dff0846798e32be4bef216f7fbfd66c0be61a1516c896f8ba930b678a8b173af941462085677a5160ab167f658a162ba19ab27fb8c564a825820277fa1f451fa32b60d042de1138168572ff2c40d552dece5412659cbe95ce73c5840ccb8d106f1f13e31324c30a5729872f6920720c2b569a45044bc363942ebe5d8748ab76dfef802da7433c846532b7496e05e8fa6f1a60d1017ca6478d351dc3d825820448f7a8fa63a0af21f58f524ab014d234a6df63ac3d734fb7b1c79ff3304ded25840128c45b33ccf741d949cc4f32555561419f66b73f99aa6a23a4791e44eefeaf3e870b9f2007923d11eee22cdb0f70caba34cfa83d71b564bad905a51fa95d746825820e81607484f4b6a4712e2d998f0ef1d27738580e78b3f7a1f156b9b7f2c502bd25840bba9b84b6e5f815434a4f336ec00104c49416afc97b480c64681f5ec1908c77409dd252b1ebd4a0821b232641db7ce1fe02bc04a470222211b1410fc0a12672c82582044920a993361e9c1fde4d3761f141799ba487ed8c0fc1be1ed617b5d33157b135840b0b779b8425660bd645ccd653c94abe14d41190c05f393cee4dfd3dc0ac9ceac6a2afc98960997c7bfbc318d6a3e5ba072aff496ebac965a9a7069181462fc0902d9010286845820bf7bd932af5105e9e1e79aadac8ca79a8beae1733000dd5cd964c95e4589df7b5840991f280e664988f44f66ad7e79de90877aafa6248f470d575025d15a955b90e1ba164d8bd2e369278f63028d542764c5884d6f47b508c862af3762524bd4584b41f14084582046f8921a563e05453e1f11ed2d3f3c3e6a6bf062710541863b450b80088022595840ddefbe8106e10820d5be66e8d10e7b1b855afdae1915995f859e00e6b4fa039b8efe6b4658237bd2da163c6d20c848bd8fe6b81bc911321fd2df72d0d2f177e6449a38709344b9efc5478458204606f6ba77e7d9a592335c141eb315ee1865f38ff9232f9385809b8df1c01ede58409f14f3d99a5ee8beea42288e4729bc3adb047c0aee2baa2ee0f6daf1981ecc7fe149e35511e0fbdd0ed8b0e404ec8d3353e978505feb87237b8e36198649a9e145f36e6a63234550d1e69b8d84582065862a7ea8f029f23b4624692b8759400ac1701a5cd21ce003158b320daa3637584085ca8c189ff0ecc35765ff2db224030b1c81bc6f4da5f3399304a1dc33dd44d1edce20a7e3ddb5bdfd71f659d24145859524b90cde25680ee7f4d08e1798d66544460b054042ff008458205df7c454f767ccd5357e5289c46600ea8d52114b49657087e21253e88be361335840678c8aee15411e0cb9e0dc6ba87ace5b06937450291692b95cc80fa1f337b6457d891477189ed48ebe1b7026a00862a165748141004fce96e90ca9fb7d5e496141e443a09006845820c083a018d2a1927593e57cfcea4dab1c6fd066e8b28205e6a9d0f9415d8c8bed58401b14226f29de0d9a3bc8455bb223f67cbb8c3ed7d840bb087f11c5a5f37b2a8ef7fed424e8a5f218962e4b2f465a3ce275c968ad6274baf4b74e646c3dfe951842bff8449a297e6601d9010282820282830302848200581c9af7d4cddb922639e7d71f1357d1cc5caae5dcc14bdee2e31763d1488200581c908a706f2f13367bed7a73bdc1859ec2402b162d749d27bb2aff1df78200581c0510eaa0c1c50577f7ac0aa0734f3bf64c4baaaaedf828e7d85e09ba8202838200581cd36ab370a58ab00ffc1a08cc16b1153b90a5a00dfe20df8a9a5293b18200581c372f254526d3ca9ed4196e02b0e1e348c0fd0f1ddb0f59f4489209758200581c51be6d5d45161ec64d064023ae1114684ec50613c11e5377d332df0e82018082040606d90102814645010000260105a382020382446268946e821b2b8b62555cc4222d1b0d5f0584dc9d1300820208829f03a2a4441f2522264107200203004420f335da23a3412f2102449ed559b241e04043ed9993a1244309c1119fd87e9f0202ffd87b9f00428df72342c11bffffff821b6e2be54266ea1da11b1091bfc81b80f08b82030482d87b9f41c200a2809f4000000542571effa32124204044cb4b2f5444cfd1d187d87c9f440ff8882e443c9a5ed5ffff821b6213e49e3f9c80231b6bb631090ba11c44f5d90103a200a20181020d42c43801818202838200581c78947974be9f1a912600d1b3767d392b5a43333c9af64616ac7131b18202848201828200581c2ad464ffc162821f6d2191409e21a6aab82febd89303bd961e9c8fda8200581c4253bdffa25c130385f41012426c6a3ef20e41c8772e300138f7a9718202838200581c62589818f7cdac4907d3c27f65285d48bf2bbd6d1543e8c4a84d16818200581ce9e5adb7ff58b92c486cd6a2805842bea353af5703beb7d7f2d525e28200581c208cabd54d0e9f2635f777700ce0cca7ad18acd105734a018ee352da8201808201848200581cfb91022759fc3f557fc8e18aa032f783d874d8a4b6bac69c4a0ce3418200581c40509bd2ea1046b6f9c0f4500b0acf5136681e46959c16c64bacd58e8200581cc1db99e76bf45dc8bfa0a7759e9b305d1369bcf26af96c3af8d42fde8200581ceba17b8a1e206e0a5b1059b9325d375d7e3be3f134fe71ee91122c478200581c239ff92b0d7e9fcfbd0ff5214cd9ecf548d76fc39f60914da5f4ef2f", diff --git a/hydra-node/golden/ReasonablySized (NodeState (Tx ConwayEra)).json b/hydra-node/golden/ReasonablySized (NodeState (Tx ConwayEra)).json index 32b7ac9b1df..92426b04ed3 100644 --- a/hydra-node/golden/ReasonablySized (NodeState (Tx ConwayEra)).json +++ b/hydra-node/golden/ReasonablySized (NodeState (Tx ConwayEra)).json @@ -5808,6 +5808,7 @@ "txId": "3c84c50f41292fb55d597cbc708088216eb6f92280c81a762855383dac2ba2d5", "type": "Tx ConwayEra" }, + "lastSnapshotTime": null, "localTxs": [ { "cborHex": "84b200d9010281825820b7f195fe67f2ade715085aecd8abdf981735a9abcac13c326604ab7d17f6c73b030dd9010282825820a110365a072734cc9943a98fc53d7d13f40cc00fcbcbba1e2fcba670379af22806825820c0f677e4d8334690ee4a2065c324e1e3743e41426cf18cc802631a1aeca1dda80712d9010281825820784864c79fc96d22a29832d83d232ffed73be8328a62a5ff70e6476ca7a84bad020184a40058390073aad82f6916f266828197fc27ec10c067a29e5f5efcbf4d844419378f329603bf23d00e6b644108fb5db4a88658baa5d3fa6d5c56ea772b01821b528b898c5d8ecfd4a1581cb0c53e2bf180858da4b64eb5598c5615bba7d723d2b604a83b7f9165a141350102820058209a11da20e6c1fa1aa7215fc63e24a1f5c5370bd6183ec36717272ccd6f1ce79303d818458200820501a400583931bb5d2749e37abe6e48ff6d79853104d1d8f4b5657f3d9c566ccb65d2aa500638a311d8a41c9160eb63d20e2551c0e951c256e3bbde31c2ce018200a1581c105a8f1bb56444cacc86378c95421aceeb326b0fb7743e493eb82fd5a14137010282005820256070a426ed2aeaa29015ec66f249b70c23a9432a009b68214ed65999f8d39003d8184b8202484701000022200101a40058391051b3f49dec6d6cd0dff6af10a551dde4ce3e7a420f5473106da9c56f4c1395dc9205a27f18b51d1a62969e3f5d0c462e474a59e67006bb7201821b348d43810787f4eba1581cbdb2d8d1941467612f8dc408811eaee8159084545d3cf43c3be8449da158182de56c69c99aa80f91cdfc6f730b43805661684adb67cfad02028201d818410003d8184682008204181ea400585782d818584d83581c65b96d4df13e3389882889d56abbd8a0d3fa3ee7837d6883c1aa87d1a20158225820736f68616b6e7669776d746e7a7070627178796c6a767871787175766472767402451a98eb0e03021a42291304018200a1581c2d725128406dc832eb74c4709aca0512499b3c7b17e00d7cb2e6d1b1a147d67d3aeca4ef97010282005820fb1c47d7059124dbfc6f364605fb5c47394c58b5aa8591c90160d7c5f9f1c69603d8185902ac82008201838201838201828200581cc64582ffaf77667cc001beef9430e21d9f841de55ab946a5bc4c65db8200581ceeed05edd04b8363ce98e0e27afda73dcb6008c37215225aeba4df838202818200581c0bffcd3c69626b0b23e72d5f43c7bf40539a8c121e46395a7493dc888201818200581c0c2006c64f1852bf68f1394bbc73385660f8438a294ca4200e50b700830303848201838200581cdef89ebb9f70e6a1fc76b08256ba0c16fd79d3741a1d81b00753937d8200581cb22792ad1752b64bfe5aedabdfe0cb685287e47871e9e925318026b78200581c1970ec985c8fadfe0176a2d27688d2e3ddd9b71f2b22fbacaf328f87830301848200581c91106b966ace542bfb7bc7d68ee37f6541539730ff6116891eec398e8200581c3f068b85d5f8e23025501ba29c9d198b345a89fc806f30633e3a00428200581cf312a03ff39a235083173169c7092ec829400f7d01cd81757865f0dc8200581cf731c9094a688dddd93107b3deb30a8170d7b2dcdb62486594b496018201838200581cf24e5769ef0c0dfb537aeda7180e3a3a282d17cf69d47d87ea0a5f928200581c7f7237567be42900848a1457e5654582a166084b4cfddb8505e21e0d8200581c273b0ba1783cdb962d23aedac133f15163b89be3b2423e2fe43b6ac98202838200581cca6797312192dc7396253ce97ef670fa152f500b6e6f1e3ee4dde7a58200581c7761564819bd7a347ffe50eed10242e1e90083f9e73d0c25b9b61c408200581c863c28302e4528adc196f70eb47fb5e7b99f48113de8aab450bb2002820183830300808200581c3db15899be770ac8ad46e34aaab3e90512e8116d6ff287aaf2c66e918202828200581c306a460572e0adb4c63afda524106352c4aa037fb9fff64cfa23504b8200581c61a569eeb4dd0eccc84d35307b419815d442b1b4c01a29960efd300910a400581d61549a0f4c3f873f5355cd3ed0b16ff629e63ac54e6b57a85f76e8129001821b2279bdb0d95fc722a1581c14aa13103a2be13c42785ff9ce090b4f5bca1c5c6573d8c9fe23f2e4a1465fe59bac3c941b3a958bc759e14378028201d8185865d87a9f42c9419f44247d43e0a320402241552242140ba442b653446950570844bf87d9a120446edfe55d44745e2c2c0343bf879e44e92e0dcaff04a4d8799f22400444f604d6d4ffa08043ca802ad8799f412223ff435830d49f05443d6b2a70ff423ce7ff03d818584d820083030081820281830301828200581c755b1256069bdb30c2bd6d90485f31d549d5066155311bb3946f9f238200581cb9be3944a779daf787bbdb3fdf8b330aeb7324ce3c50711c5bf50ac3021a0003aeba030104d9010286830e8200581c93c18f13b9e00bdaa0800f8e6ce2483deabcd899d369587d251ff5408200581c6d18ad55c01d9505a09fa1b1d1c1ebdae556d256ff3687955db78e39840c8201581cf43210ef4961c5419c7ea748aa90a56e63889d07aed004745547bbeb81021a000951fd84108200581ca7aa0d1c2c90b57d05870877d32feaa744a48380e4ce161b206cebfa1a0006d12ff68304581c4d9958d3adc6784bb8c1833286b14fed248da06b151e74f697ea516f098304581c69fbf697e3152f118bb0b8193718f6a3366ddbf6b7c65ce51349e0040883118200581c856a92b15621f46a1ae2fd7b98350d4a17671929120e6a0c9b697b811a00057c6705a1581de0062ab5316cb33504580c2e7becdfca25d8138603e54e2fdd277ca3221a0002e46d08010ed9010283581c6e3fcd7692702918f4171683cdd12feb99c32fe3d2cea5f9a163608d581ce04d32f1542034a028e2dd9dbc40109ea461c696bd8358a98face432581cfcbaefe145f5b077c2257aa96e746d27dea56d1c2d96270d5986290609a1581c4cd804c139d1d224f28528a9445f0f6dc5cd18a5f9bfebad11a543f2a141db1b42e1b9b61cae27570b5820fb9df7a1cef7d7a2b0017d2cf4d6e2374293648b5d2d9382f54a5a24e11bf1560758206ed73973080aa2bba02c811523b73bd7d80bad25d9a324253d5a1847bb04af710f0013a68201581c4969a2b0a3c0c754a865d5222830639d36db060b2d620af68a46e72ca48258204bf8e191bece8388d9c5580b5e6b63bc565009bb5753d0b35854796dc3dc494104820082783368747470733a2f2f734a58564746334638447a305139624f376a446c3533587171356a454247317366396c7654454a2e636f6d5820e9a1671be80c03500ec52d5f749cf17dac54afbcdabaf2db97743748989cbf2a825820843869ff14324ee02ac00fe6f3570f9bf698c8ab92594cac8564255d1b3ed8f507820182781d68747470733a2f2f38374d3276516f6a673446387465626c522e636f6d5820592f18e84282ee6169b83abdf147763717ae9fc1e781dc21f32ccf2ca1b1aa66825820855010286e1527a46bd0b3fc66f740ef5486702719f9175f7f28b714a2a7cd3405820082781a68747470733a2f2f4452794655393658686b666748612e636f6d582007550de3a274bf427f410b1a103a6c44005a901484e3c3f11b39a2ba42def3fb8258208d43f9151b9dd46c38fbdd48fcd3ffb91c50118f51173dd11c1527356018af3004820282781968747470733a2f2f67414c765778695349475755452e636f6d5820f691591f40cf7d65a43659bd7ab4937199e0efd5e549cb937047f33d11cf71d48201581c9f707a93c26e3ed64402d4b0522ebf65eadbaeb838d30f062d02776ba58258200139b28d10dde793ee09601d07469717e69ca87b6ad8e45a3fb88e33e90ed40508820182782368747470733a2f2f31636758462e6d5569304939776661634e6239457769382e636f6d582030faa04ce64b09d74e58fda4cc4fcfe09b14358dd534c700e26b1a25e9c957a18258200d4fab3bf0c2b8a20c598b2519c6e2cba74ce1d39eb4278625c6f2daaa8562eb00820182781968747470733a2f2f544141794a597371584a6a73722e636f6d5820199a75b1a78710046b11b993e5ef3678b7870997ec5900cd2ac1cd7fe06ff17982582018085dc14f6bcdd3fd24199733579124904d3fe5891f9905adde06d4ff672165038201f68258201c378dfd3fd24023e2f0eb2c41202b6f0eea319e26e2af101af57f8df05369ca038201f682582072b52a059251a84d68bce62e5ed5293e72701a0a53fb6a2038402356e573d5cb028202827468747470733a2f2f4756684373474e472e636f6d582020edbcd07105b064adc5fac3e61b5e44a17427f51573aa7d541223894a5ab6e28202581c37d01fddfe17f4bf48aca1de2258e5fbb84e42ab804800fc41361c34a2825820070c118ff1f51917870fee84d5d62e359c1fe7b45032b04541ef53d8750a276d088200f68258206e63096d6f74705e1732bdf2c5a1153647d6e467c45da4c0a0c43459dee03000048201f68204581c220de3544d48ac1d1c693a90a3597bb34046b9ef39cb3359fd5c2e73a282582042c24439b300c7375fc9479848d5c83d1325576f10009be0bd60c3631e3ced91068201827668747470733a2f2f4578464269554c4a656f2e636f6d582082172e198222093987e1ff4948d94c722bf26230262cbb4cf110c57f35cc27128258208619a4796fb91b6cdf0c1cbc928d655cfcebdfbbccbbc60b30bd7ba3a5a7f82407820182782168747470733a2f2f4e59716d745276634e454c69544f374d324e69386b2e636f6d58202e164373b9b85e6d8cc9fa25657e933155856fad7bf9e1bc5891573b66c028308204581c558270facd14d239701cdcc680befa572bbab80ce9dc897bb13403caa18258207e5534246905413aaf4518f18ac32ce6f0b073a21e6edf20c7149e8b2d0ae74c03820082783c68747470733a2f2f503761544e7a797a4b2d302d4a6d536e6472616d395162706536526b6576662e6f5a515236642e2d37523651724c6c6c2e636f6d582052c3e4baf5676aeab3d5decc8173eb1dff1a40dbc78d8066c18644c2e94d3d198204581c686848d9fb5f10edd38f2ebe9628afdd64a797d54d759ac12343e326a18258208fef6f4326bc17035bc2f48e221c8b72244122b34b9cee66dceaa2caea24bf8603820082781c68747470733a2f2f6f4279312e31586d4a4248536b4c36522e636f6d58209a57c9c64be48b71fb1db7a10939dded7d81725940b1bc88bdf58780198dbd1a14d9010282841a0003eb44581de1594263fa244435e538705247c45ebbbf4d181a24c7de42a455a394fe850482582013705ff66e151a8eb85e04a5d4ca52d743950ab34c9cf2502f2b7edfeb2ba87004d90102828201581cb258b395f42c50481e14cc40ac299b6c64e9602972f5a18a36ccba918200581c2aac5345e3cd62c05552e7eb43e2be483f50ca7e904122e4cae02546a48201581ca5d7312cfc46363111a36b42d9629644f75e5a3279719098eefd41110d8200581c7ee6cbe534130e37bdada938550e863b828aef2e891c9e9549b54487088200581c9b824466dd982376ed914ea4d32b8e7ef71a1e1b93c3af174af7a90d098200581ce781b873b8b36e04f7a7f5f211cd1a64878582101603ba56f98364f50dd81e821b0000000460d840d51b00000005d21dba0082783b68747470733a2f2f6f6763344971383644534d5732696a47795037526e4670324e436b596d546a3634496c7a77706146616549354f2d342e636f6d58209d5dd65a8c03ce948b4da120701c6eca910a69f609a12b30bf427802e237c98d841a000e5624581de058570027c34ebd4b8bcc4a493450f3343e43e11432c48d09cb9db21483018258206e99e2dae3996a1d320b0ba3549ace0a89a3255f311e9ca0d906be6f97bb0bad0682070082782468747470733a2f2f6e534650355571696c767a574a6230384b494179545276652e636f6d5820cfcfc0ec9f6349bd3ddd4494cd476007b42da53438d13afdc4e2b14928ad0570161a0004f09ba402d901028384582026688f1aca216d83e6ca380e44cd2ce9aacc8ea404de208d1dfb9417f6500ed55840669aca4d8fad30658565f2d1cbb17c4df4ad918e53e71815169882b1682345c509ea2e8955b681c9588d3c6002105976e19703101f740484fcc98d5ad6ba29284311dbc544ecc0a8e584582005a0722c511058453e2d8c9f40d25b919db7b23719c255df750fff811e6ab34458402d51dad78bd37a5d854c3de0fc74caec7e89adb14f41966300b56080c0de05697356ef741bc47a06799516820f2766afba3f675ba4d69f0a6173919a95cf341540423d148458206b920977301914dd6de1c17c5d8c6e73f793ed5c8d9db9be651a7fa4efa3e5765840b2e92f477ee09e6c8a3472f888f808d13f90655f65bd227b19a51df619850ba08779b2ea4880f42648426d68f5a3c8baf769f2a8f0ccce87fb8c11ae9b7733764279cc415b01d9010285820184830300838200581c50f5ef9ec9e53eaa4d11ec0001ccddcd5588dc8fe08eb69f3ac4d9898200581c7cc13aa21e5ea0e8174bcbc9cfeac2f7c582a452c468676f51f4afee8200581c7ae6f61e4a31a226f3c835c8136f7afab74d9988b591dc61852f9bdf820183830301828200581cf14ba0aae696408d2ed4302b55427ed98ba172dc3cf9037d1d36b1c88200581c15d30ec6af692f8025fa6896b2bfef33b9da109757616c0f5526dfad8200581cf830ef981354822f3eefad05a4080f76d9f82775f9b92b751d5822768202838200581ceca28e6d02f6c6260763a6c78d7a5392e47801c713656a1c745e12c38200581c6918a425b6094b7aa59260837df771fcf0cb03d9c2118ee3f21d7d888200581cc418d19742c521a313af2ae3813e5527ff362d52f555cb1405f9a97a8202828202808201828200581c320f50e34726562f3278c0d2892dcab27831aa782637505f633b14f68200581cfc23784e930e8ddfbd69d2795a9005e0746c519d4115deaf91f8358e8202848201808202808202818200581c8ff13c9ad956c8fcf17abd643a8f73a184d9e6eb894c775280ddfda48201848200581ce9936849ab2ec2c1504a9b878f7c0b48c810dd93a0e1c9a115289e448200581c4c95d260f0801d2094e70f081ec1940f95e0e8db97344628954284148200581c9f4cafbb061e873d5f0d936ec258e8ff865a1cd435f1a784cd15ec248200581c6045fefac7c076e486641107f1e387400b9c5807ff70939662ed14268200581cfd53bc1fed3960e682b81b13911023bb848f0d6c3250da92c3c8b0f4820184830300808201828201828200581c5fe95198eb5b4ac951fb5eb002d9ebe8a763fab2442aa3983bd7f3df8200581cc39cb07b702cdaa3f060a087b92a86496d572424d2c5e754cc2f3f948201818200581cb4bc87b972edfdccbfb1ca893076c1af105def9248b2cb162502c02d830300818201828200581cfc89044108dcc94402b47c88d0748ff44abf8ca634ab9608ce0429568200581c5cd367de5324816ab247fa38e8c81b96b2e945ef5ac5996fa88c405982028082018082050304d9010283d8799f80ffa2d87c9fd87d9f2341e40523ff9f4103448399232624426ea022ffff4001249f04425a1b00ff05a38201068201821b462ea677cc05774d1b2cbff2bc83fd79d68201088240821b76b12cfae2eb75451b3284bccf6fc55fb08205028221821b7132a892a12f25071b29f63ef1206873fcf5d90103a100a1016a4b2ee2809740f3be8685", diff --git a/hydra-node/golden/RunOptions.json b/hydra-node/golden/RunOptions.json index cccde9ca78c..5dc2ce63a25 100644 --- a/hydra-node/golden/RunOptions.json +++ b/hydra-node/golden/RunOptions.json @@ -1,25 +1,16 @@ { "samples": [ { - "advertise": { - "hostname": "0.0.43.126", - "port": 1984 - }, + "advertise": null, "apiHost": { - "ipv4": "0.0.70.192", + "ipv4": "0.0.50.29", "tag": "IPv4" }, - "apiPort": 18036, - "apiTransactionTimeout": 0, + "apiPort": 10269, + "apiTransactionTimeout": 37292, "chainConfig": { - "cardanoSigningKey": "a/c/b/c.sk", - "cardanoVerificationKeys": [ - "a/b/a.vk", - "b/c.vk", - "c.vk", - "b/b.vk", - "a/b/b.vk" - ], + "cardanoSigningKey": "c/b/c/c/a.sk", + "cardanoVerificationKeys": [], "chainBackendOptions": { "contents": { "networkId": { @@ -31,188 +22,184 @@ "tag": "Direct" }, "contestationPeriod": 604800, - "depositPeriod": 13568, + "depositPeriod": 34638, "hydraScriptsTxId": [ - "0308050108080605060300050607080801070606050403020608050103080807" + "0605070204050608020106030206010305050403030202040304070600000303", + "0801080100040606070202010504020006050100050008030302050804070604", + "0205060202000100020800080104000203050207030802050605030804050202", + "0407000403050607000804070002060202000105030801020403010003040201", + "0008070002040003060201060102060502040502030304080105060704020703", + "0002070608040604060807040104060405070602050601020604010507060705" ], "startChainFrom": { - "blockHash": "41bf3917fca4c554e76325211df0772261c057b1a73df05fa0ad980294837273", - "slot": 16016247, + "blockHash": "d30135e5f52ce44f9b5ec8bdd15e246581c23f669d7bc49a33dea621f6a18038", + "slot": 11150471, "tag": "ChainPoint" }, "tag": "CardanoChainConfig" }, - "hydraSigningKey": "b/c.sk", + "hydraSigningKey": "c/b/c/a/b.sk", "hydraVerificationKeys": [ - "b/a.vk", - "c/a/a.vk", - "b/c/b.vk", - "c/c.vk", - "b/a.vk", - "a/a/c.vk" + "c/c/c.vk" ], "ledgerConfig": { - "cardanoLedgerProtocolParametersFile": "c.json" + "cardanoLedgerProtocolParametersFile": "c/c/a.json" }, "listen": { - "hostname": "0.0.61.156", - "port": 18484 + "hostname": "0.0.61.219", + "port": 20130 }, - "monitoringPort": 14575, - "nodeId": "ghz", - "peers": [ - { - "hostname": "0.0.0.5", - "port": 3 - } - ], - "persistenceDir": "c/c/a", - "persistenceRotateAfter": null, - "tlsCertPath": null, - "tlsKeyPath": "c/b/a/a/b/a.key", + "monitoringPort": null, + "nodeId": "sorfkmmrgqigapitelhsuybe", + "peers": [], + "persistenceDir": "c/b", + "persistenceRotateAfter": 94784, + "snapshotBatchSize": 49, + "snapshotInterval": 4.979, + "tlsCertPath": "b/a/b.pem", + "tlsKeyPath": "b/c/a/b.key", "verbosity": { "contents": "HydraNode", "tag": "Verbose" }, - "whichEtcd": "EmbeddedEtcd" + "whichEtcd": "SystemEtcd" }, { "advertise": { - "hostname": "0.0.111.159", - "port": 21035 + "hostname": "0.0.85.47", + "port": 5341 }, "apiHost": { - "ipv4": "0.0.106.50", + "ipv4": "0.0.15.242", "tag": "IPv4" }, - "apiPort": 32191, - "apiTransactionTimeout": 27, + "apiPort": 4901, + "apiTransactionTimeout": 49949, "chainConfig": { - "initialUTxOFile": "b/b/c/a.json", - "ledgerGenesisFile": null, - "offlineHeadSeed": "605a4786e76db4b28c3668751db34615", - "tag": "OfflineChainConfig" + "cardanoSigningKey": "a/c/c.sk", + "cardanoVerificationKeys": [ + "a/b.vk", + "b/a/a.vk", + "c.vk", + "b/b/a.vk", + "b.vk" + ], + "chainBackendOptions": { + "contents": { + "projectPath": "blockfrost-project.txt", + "queryTimeout": 20, + "retryTimeout": 300 + }, + "tag": "Blockfrost" + }, + "contestationPeriod": 15799, + "depositPeriod": 30391, + "hydraScriptsTxId": [ + "0705010706040208010104010104010806040805050708000503050205050700", + "0406050000040003080805010205080807030402010507020800000103020001", + "0700020307070200050002020806010703060303030504000605010404060105" + ], + "startChainFrom": null, + "tag": "CardanoChainConfig" }, - "hydraSigningKey": "a/c.sk", + "hydraSigningKey": "b/c/b.sk", "hydraVerificationKeys": [ - "c.vk", - "a/b/a.vk", + "c/b/b.vk", "a.vk", - "b/a.vk", - "a/a/b.vk", - "b/b/c.vk" + "b/c.vk", + "c/a.vk", + "b/c.vk" ], "ledgerConfig": { - "cardanoLedgerProtocolParametersFile": "b/a/b/a/a/a.json" + "cardanoLedgerProtocolParametersFile": "c/c.json" }, "listen": { - "hostname": "0.0.15.242", - "port": 32379 + "hostname": "0.0.7.162", + "port": 14610 }, - "monitoringPort": 29896, - "nodeId": "elvahkvrhdyjq", + "monitoringPort": 10691, + "nodeId": "gqegwlhhvpstjqwxtc", "peers": [ + { + "hostname": "0.0.0.3", + "port": 7 + }, { "hostname": "0.0.0.5", "port": 1 }, { - "hostname": "0.0.0.1", - "port": 1 + "hostname": "0.0.0.6", + "port": 8 }, { - "hostname": "0.0.0.1", - "port": 4 + "hostname": "0.0.0.6", + "port": 6 + }, + { + "hostname": "0.0.0.8", + "port": 1 } ], - "persistenceDir": "c/c", - "persistenceRotateAfter": 30, - "tlsCertPath": null, - "tlsKeyPath": "b/a/b/a.key", + "persistenceDir": "a/a/b/a", + "persistenceRotateAfter": 97785, + "snapshotBatchSize": 88, + "snapshotInterval": 4.749, + "tlsCertPath": "b.pem", + "tlsKeyPath": "a/c/a/a.key", "verbosity": { - "contents": "HydraNode", - "tag": "Verbose" + "tag": "Quiet" }, "whichEtcd": "SystemEtcd" }, { - "advertise": { - "hostname": "0.0.56.147", - "port": 26421 - }, + "advertise": null, "apiHost": { - "ipv4": "0.0.31.46", + "ipv4": "0.0.122.4", "tag": "IPv4" }, - "apiPort": 20019, - "apiTransactionTimeout": 26, + "apiPort": 26115, + "apiTransactionTimeout": 24923, "chainConfig": { - "cardanoSigningKey": "c/b/a/b/c/b.sk", - "cardanoVerificationKeys": [ - "a/c/b.vk", - "b.vk", - "c/b/b.vk", - "b/c/b.vk", - "a/a/c.vk", - "c/b/b.vk" - ], - "chainBackendOptions": { - "contents": { - "networkId": { - "magic": 42, - "tag": "Testnet" - }, - "nodeSocket": "node.socket" - }, - "tag": "Direct" - }, - "contestationPeriod": 604800, - "depositPeriod": 7788, - "hydraScriptsTxId": [ - "0107030001030003010403010201000301040808010501040703060005070002", - "0507060005030307000407080303050200040008060007080107060404050001" - ], - "startChainFrom": { - "blockHash": "81667e2b53fa0fc89adae700530801ee0d1c65ba7a2e3b2babb9de600015194c", - "slot": 14063413, - "tag": "ChainPoint" - }, - "tag": "CardanoChainConfig" + "initialUTxOFile": "b/c/c/b/c.json", + "ledgerGenesisFile": null, + "offlineHeadSeed": "d0db6b134d159da8571bc4838114def8", + "tag": "OfflineChainConfig" }, - "hydraSigningKey": "a.sk", - "hydraVerificationKeys": [ - "a/b/c.vk", - "b/a/a.vk", - "b.vk", - "a.vk", - "a.vk" - ], + "hydraSigningKey": "b/b.sk", + "hydraVerificationKeys": [], "ledgerConfig": { - "cardanoLedgerProtocolParametersFile": "b/a/b/c/a.json" + "cardanoLedgerProtocolParametersFile": "c/c/b/c.json" }, "listen": { - "hostname": "0.0.18.217", - "port": 21720 + "hostname": "0.0.44.247", + "port": 29918 }, - "monitoringPort": 338, - "nodeId": "enjl", + "monitoringPort": 15712, + "nodeId": "gxklajzqnrcqyxchcnoiap", "peers": [ { - "hostname": "0.0.0.5", - "port": 4 + "hostname": "0.0.0.3", + "port": 1 }, { - "hostname": "0.0.0.6", - "port": 3 + "hostname": "0.0.0.3", + "port": 2 }, { - "hostname": "0.0.0.7", - "port": 7 + "hostname": "0.0.0.3", + "port": 4 + }, + { + "hostname": "0.0.0.0", + "port": 2 } ], - "persistenceDir": "c/c/b/c", - "persistenceRotateAfter": 16, - "tlsCertPath": "a/a/b/c/b/b.pem", + "persistenceDir": "c/b/a/b", + "persistenceRotateAfter": null, + "snapshotBatchSize": 50, + "snapshotInterval": 0.541, + "tlsCertPath": "c.pem", "tlsKeyPath": null, "verbosity": { "tag": "Quiet" @@ -220,113 +207,130 @@ "whichEtcd": "SystemEtcd" }, { - "advertise": null, + "advertise": { + "hostname": "0.0.89.193", + "port": 9386 + }, "apiHost": { - "ipv4": "0.0.25.188", + "ipv4": "0.0.98.245", "tag": "IPv4" }, - "apiPort": 9965, - "apiTransactionTimeout": 17, + "apiPort": 6626, + "apiTransactionTimeout": 42179, "chainConfig": { - "initialUTxOFile": "b/c/b/a/a/c.json", - "ledgerGenesisFile": "b/c/a.json", - "offlineHeadSeed": "329e98b0b3a9efc003e42da13950a34f", + "initialUTxOFile": "b/b/b/a.json", + "ledgerGenesisFile": "a/a/b.json", + "offlineHeadSeed": "8c05d6612ce8d36e51034c5483f04c5f", "tag": "OfflineChainConfig" }, - "hydraSigningKey": "b/c.sk", + "hydraSigningKey": "c/b.sk", "hydraVerificationKeys": [ - "b/a.vk" + "c/b.vk" ], "ledgerConfig": { - "cardanoLedgerProtocolParametersFile": "a/b/b/a/a/a.json" + "cardanoLedgerProtocolParametersFile": "c/a.json" }, "listen": { - "hostname": "0.0.106.207", - "port": 20072 + "hostname": "0.0.76.240", + "port": 16962 }, - "monitoringPort": 31045, - "nodeId": "csqnihmmawedbhcwwpna", - "peers": [], - "persistenceDir": "c/a", - "persistenceRotateAfter": 0, - "tlsCertPath": "a/b/c/b/b/a.pem", - "tlsKeyPath": "b/c/a/b.key", + "monitoringPort": null, + "nodeId": "fhdpmz", + "peers": [ + { + "hostname": "0.0.0.1", + "port": 0 + }, + { + "hostname": "0.0.0.6", + "port": 0 + }, + { + "hostname": "0.0.0.2", + "port": 8 + }, + { + "hostname": "0.0.0.0", + "port": 4 + }, + { + "hostname": "0.0.0.3", + "port": 6 + } + ], + "persistenceDir": "b", + "persistenceRotateAfter": null, + "snapshotBatchSize": 30, + "snapshotInterval": 1.165, + "tlsCertPath": "b/c/c.pem", + "tlsKeyPath": null, "verbosity": { "contents": "HydraNode", "tag": "Verbose" }, - "whichEtcd": "EmbeddedEtcd" + "whichEtcd": "SystemEtcd" }, { - "advertise": { - "hostname": "0.0.76.153", - "port": 10391 - }, + "advertise": null, "apiHost": { - "ipv4": "0.0.56.37", + "ipv4": "0.0.8.4", "tag": "IPv4" }, - "apiPort": 8054, - "apiTransactionTimeout": 23, + "apiPort": 27032, + "apiTransactionTimeout": 20839, "chainConfig": { - "cardanoSigningKey": "a/c/a.sk", - "cardanoVerificationKeys": [ - "a.vk", - "a/a.vk", - "a/a.vk", - "a/a.vk", - "b/b/b.vk" - ], - "chainBackendOptions": { - "contents": { - "networkId": { - "magic": 42, - "tag": "Testnet" - }, - "nodeSocket": "node.socket" - }, - "tag": "Direct" - }, - "contestationPeriod": 39741, - "depositPeriod": 1119, - "hydraScriptsTxId": [ - "0203060008010405020004080100080403020305050205080508050800000805", - "0508010600020500050401000706070602010301020103020501000701060004", - "0600000601040000050806020308060804060304000305050500070706000801" - ], - "startChainFrom": null, - "tag": "CardanoChainConfig" + "initialUTxOFile": "c/a/a.json", + "ledgerGenesisFile": "a/a/c.json", + "offlineHeadSeed": "d6d530751fab387e74480f62a5755c7e", + "tag": "OfflineChainConfig" }, - "hydraSigningKey": "b/c/c/b/a/b.sk", + "hydraSigningKey": "b/c.sk", "hydraVerificationKeys": [ - "b.vk" + "b.vk", + "a/a.vk", + "c.vk", + "b/b/c.vk", + "b/c/a.vk" ], "ledgerConfig": { - "cardanoLedgerProtocolParametersFile": "a/c/a/a/a/a.json" + "cardanoLedgerProtocolParametersFile": "b/a/a/a/a/c.json" }, "listen": { - "hostname": "0.0.8.4", - "port": 25176 + "hostname": "0.0.6.86", + "port": 951 }, - "monitoringPort": 13997, - "nodeId": "zlgbpdultsxtzonc", + "monitoringPort": 24185, + "nodeId": "suhmifrtfhhrbylxfsigd", "peers": [ { - "hostname": "0.0.0.5", - "port": 5 + "hostname": "0.0.0.7", + "port": 6 }, { - "hostname": "0.0.0.2", + "hostname": "0.0.0.6", "port": 2 + }, + { + "hostname": "0.0.0.3", + "port": 2 + }, + { + "hostname": "0.0.0.4", + "port": 5 + }, + { + "hostname": "0.0.0.7", + "port": 1 } ], - "persistenceDir": "b/a/a/a/a/c", - "persistenceRotateAfter": 13, - "tlsCertPath": "c/a.pem", - "tlsKeyPath": "c.key", + "persistenceDir": "b/a", + "persistenceRotateAfter": 25820, + "snapshotBatchSize": 37, + "snapshotInterval": 2.405, + "tlsCertPath": null, + "tlsKeyPath": "c/a/b/a.key", "verbosity": { - "contents": "HydraNode", - "tag": "Verbose" + "tag": "Quiet" }, "whichEtcd": "EmbeddedEtcd" } diff --git a/hydra-node/golden/StateChanged/Checkpoint.json b/hydra-node/golden/StateChanged/Checkpoint.json index 91eddd8098c..d78f319784a 100644 --- a/hydra-node/golden/StateChanged/Checkpoint.json +++ b/hydra-node/golden/StateChanged/Checkpoint.json @@ -98,6 +98,7 @@ "txId": "7699c82f91a758c5c9cd58fd4b29585586d718f4cc060a9cb0bd208fbbe4e932", "type": "Tx ConwayEra" }, + "lastSnapshotTime": null, "localTxs": [ { "cborHex": "84ac00d90102818258201c7acdbbb69fe78a5749d115468d5ef067ce3993a4ded931af0624e9709bcbdd000181a3005839013c9cf4f61017542cb588c19e84926ae2f71ff55bd591820e039a2098656d11eafc31f092555352d13cb046030f04b00273c2e9c1b79af4f4018200a1581c467f58932b54910584a0e8ea25a225e06a14530b2e96e938c53a3f22a143cd12700103d81858fe8200820283830301818201828200581c0ea94cd02ea4b3104da54d45c4dfcd1a9f652962087619793a90a68a8200581c5a048f7fee256484df29c9044cfb8b5577830f665d46084df5b939fe830300808202838201828200581ca022444d36c82368965500fcb17806c9d9cffc16f73983f76d8911ca8200581cc9b2eef8cb8aa46d97c2c49a4f5afd58bd813d1d936c1e3f7dfc676a830300818200581c4cd1e28bf1b2dca0f574e24eda3d26e34f500d1dbd5eb0ea1138439c830302828200581c94146d24d70a9a558a87e83cac69047a6136006294696860497131af8200581c54e2edda6bc348d3edbb6a792dd9d7b73e7a69e60bd02a4e904782f010825839112620160bc180821fc86a658d19c02a13bdd18db3a500569500595b97922390a7ab98fe5eb0fa165709d6006944bacfd69182b75e170d81668200a1581c78c0a6d1854e873848da7d1016c1cfca7c4ff21933dd1e850433fe63a153dc57802fd9f9faf3fc527736c1d2824bef58411b52a986a380b6da2e0219f87a030105a1581de015d83a89ed70bb5e6eb6b172b576b5bbf62fd2817658854e205f2c401a0006549e080109a1581c2e12c5e499e0521b13837391beed1248a2e36117370662ee75918b56a14e79cb08a4bc065767e081518fb6fc3b5f7623d0aa1b01110b5820a73deff8a89623cc2493822d01ced3168066beff264d55ca66259b227fab976d075820fca574672a6d5993be085898b5543889d538e284f9869950156e4e8007fd988a13a18200581c8298917b799e00c72d0d6be78bcb73a9c443908892cdee08ab5dde01a182582080e3fdf0141242bc9fb48d1790db8a5a3a8cdaf7dedd0d1783d372f75bcb275400820082783368747470733a2f2f334150637538675651732e67696176433044422d37624d674f6969336779347561616d474a627a2e636f6d582081094044fccfce7cc81509a5f06bc434e8bbe3265b27ee113d13a0ef77c07c3a161a0005bb36a300d9010281825820283d9201d8a07d59b2ac90884324f0fcd498a1d1288904b33751e0ba4730b3d05840ea21d3e35fa9cf525605e0dc70cc1d53c6cfcae8fae8252c1dec38697325815d6bc4fd92613debf95e6275b23450b4ab8f50c34d7fc5102a2b0b356feb7e7e2702d9010281845820f59f0e3e8b715413e797e896442686efe8e392596d5e6c0c96bcba0bfdb838b058401fdf5f0bfd0d98baa7717be1075499234c9e8fd8cf1732841802500839d8f71cfe71afdb659b9685e220c836b704e7128ac76ce0609911e4a16c67fdd4799e1e404005a18202008280821b03b913622ab759f51b66fe9e80c9e9ee71f5d90103a101818200581cb38fe0368bd1cbd6c8df3f6525773a1216387414b721edb4caf2e3c6", diff --git a/hydra-node/golden/StateChanged/SnapshotRequestDecided.json b/hydra-node/golden/StateChanged/SnapshotRequestDecided.json index 3a77422f899..cb122e80150 100644 --- a/hydra-node/golden/StateChanged/SnapshotRequestDecided.json +++ b/hydra-node/golden/StateChanged/SnapshotRequestDecided.json @@ -1,6 +1,7 @@ { "samples": [ { + "requestedAt": null, "snapshotNumber": 1, "tag": "SnapshotRequestDecided" } diff --git a/hydra-node/json-schemas/api.yaml b/hydra-node/json-schemas/api.yaml index 68b811e52bc..57495f54d3a 100644 --- a/hydra-node/json-schemas/api.yaml +++ b/hydra-node/json-schemas/api.yaml @@ -3001,6 +3001,8 @@ components: - contestationPeriod - depositPeriod - configuredPeers + - snapshotBatchSize + - snapshotInterval additionalProperties: false properties: party: @@ -3021,6 +3023,14 @@ components: $ref: "api.yaml#/components/schemas/DepositPeriod" configuredPeers: type: string + snapshotBatchSize: + type: integer + minimum: 1 + description: Number of transactions to accumulate before requesting a snapshot + snapshotInterval: + type: number + format: double + description: Maximum time interval (in seconds) between snapshots NetworkInfo: type: object @@ -3405,6 +3415,7 @@ components: - currentDepositTxId - decommitTx - version + - lastSnapshotTime properties: localUTxO: $ref: "api.yaml#/components/schemas/UTxO" @@ -3432,6 +3443,12 @@ components: - $ref: "api.yaml#/components/schemas/Transaction" version: $ref: "api.yaml#/components/schemas/SnapshotVersion" + lastSnapshotTime: + oneOf: + - type: "null" + - type: string + format: date-time + description: The UTC time when the last snapshot was requested Deposit: type: object diff --git a/hydra-node/src/Hydra/HeadLogic.hs b/hydra-node/src/Hydra/HeadLogic.hs index ab4c088c702..d092563a472 100644 --- a/hydra-node/src/Hydra/HeadLogic.hs +++ b/hydra-node/src/Hydra/HeadLogic.hs @@ -335,17 +335,21 @@ onOpenNetworkReqTx env ledger currentSlot st ttl tx = newState TxInvalid{headId, utxo = localUTxO, transaction = tx, validationError = err} maybeRequestSnapshot nextSn outcome = - if not snapshotInFlight && isLeader parameters party nextSn - then - outcome - -- XXX: This state update has no equivalence in the - -- spec. Do we really need to store that we have - -- requested a snapshot? If yes, should update spec. - <> newState SnapshotRequestDecided{snapshotNumber = nextSn} - <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs') decommitTx currentDepositTxId) - else outcome + -- Batch-size based throttling: only request snapshot if we have accumulated + -- enough transactions (>= snapshotBatchSize). The time-based trigger is + -- handled separately in onOpenChainTick. + let batchReady = fromIntegral (length localTxs') >= snapshotBatchSize + in if not snapshotInFlight && isLeader parameters party nextSn && batchReady + then + outcome + -- XXX: This state update has no equivalence in the + -- spec. Do we really need to store that we have + -- requested a snapshot? If yes, should update spec. + <> newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Nothing} + <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs') decommitTx currentDepositTxId) + else outcome - Environment{party} = env + Environment{party, snapshotBatchSize} = env Ledger{applyTransactions} = ledger @@ -693,7 +697,7 @@ onOpenNetworkAckSn Environment{party} pendingDeposits openState otherParty snaps if isLeader parameters party nextSn && not (null localTxs) then outcome - <> newState SnapshotRequestDecided{snapshotNumber = nextSn} + <> newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Nothing} <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) decommitTx currentDepositTxId) else outcome @@ -968,34 +972,56 @@ onChainTick env pendingDeposits chainTime = -- __Transition__: 'OpenState' โ†’ 'OpenState' -- -- This is primarily used to track deposits and either drop them or request --- snapshots for inclusion. +-- snapshots for inclusion. It also handles time-based snapshot throttling. onOpenChainTick :: IsTx tx => Environment -> UTCTime -> PendingDeposits tx -> OpenState tx -> Outcome tx onOpenChainTick env chainTime pendingDeposits st = - -- Determine new active and new expired - let nextDeposits = determineNextDepositStatus env pendingDeposits chainTime - newActive = Map.filter (\Deposit{status} -> status == Active) nextDeposits - newExpired = Map.filter (\Deposit{status} -> status == Expired) nextDeposits - in -- Apply state changes and pick next active to request snapshot - -- XXX: This is smelly as we rely on Map <> to override entries (left - -- biased). This is also weird because we want to actually apply the state - -- change and also to determine the next active. - withNextActive (newActive <> newExpired <> pendingDeposits) $ \depositTxId -> - -- REVIEW: this is not really a wait, but discard? - -- TODO: Spec: wait tx๐œ” = โŠฅ โˆง ๐‘ˆ๐›ผ = โˆ… - if isNothing decommitTx - && isNothing currentDepositTxId + -- Time-based snapshot throttling: trigger snapshot if interval has passed + -- and there are pending transactions + maybeTimeBasedSnapshot + -- Determine new active and new expired + <> let nextDeposits = determineNextDepositStatus env pendingDeposits chainTime + newActive = Map.filter (\Deposit{status} -> status == Active) nextDeposits + newExpired = Map.filter (\Deposit{status} -> status == Expired) nextDeposits + in -- Apply state changes and pick next active to request snapshot + -- XXX: This is smelly as we rely on Map <> to override entries (left + -- biased). This is also weird because we want to actually apply the state + -- change and also to determine the next active. + withNextActive (newActive <> newExpired <> pendingDeposits) $ \depositTxId -> + -- REVIEW: this is not really a wait, but discard? + -- TODO: Spec: wait tx๐œ” = โŠฅ โˆง ๐‘ˆ๐›ผ = โˆ… + if isNothing decommitTx + && isNothing currentDepositTxId + && not snapshotInFlight + && isLeader parameters party nextSn + then + -- XXX: This state update has no equivalence in the + -- spec. Do we really need to store that we have + -- requested a snapshot? If yes, should update spec. + newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Just chainTime} + -- Spec: multicast (reqSn,ฬ‚ ๐‘ฃ,ฬ„ ๐’ฎ.๐‘  + 1,ฬ‚ ๐’ฏ, ๐‘ˆ๐›ผ, โŠฅ) + <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) Nothing (Just depositTxId)) + else + noop + where + -- Time-based snapshot triggering: request snapshot if enough time has passed + -- since the last snapshot AND there are pending transactions. + maybeTimeBasedSnapshot = + let hasPendingTxs = not (null localTxs) + intervalPassed = case lastSnapshotTime of + Nothing -> True -- No previous snapshot, allow triggering + Just lastTime -> diffUTCTime chainTime lastTime >= snapshotInterval + in if hasPendingTxs + && intervalPassed && not snapshotInFlight && isLeader parameters party nextSn + && isNothing decommitTx + && isNothing currentDepositTxId then - -- XXX: This state update has no equivalence in the - -- spec. Do we really need to store that we have - -- requested a snapshot? If yes, should update spec. - newState SnapshotRequestDecided{snapshotNumber = nextSn} - -- Spec: multicast (reqSn,ฬ‚ ๐‘ฃ,ฬ„ ๐’ฎ.๐‘  + 1,ฬ‚ ๐’ฏ, ๐‘ˆ๐›ผ, โŠฅ) - <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) Nothing (Just depositTxId)) + newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Just chainTime} + <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) decommitTx currentDepositTxId) else noop - where + -- Pending active deposits are selected in arrival order (FIFO). withNextActive :: forall tx. (Eq (UTxOType tx), Monoid (UTxOType tx)) => Map (TxIdType tx) (Deposit tx) -> (TxIdType tx -> Outcome tx) -> Outcome tx withNextActive deposits cont = do @@ -1008,7 +1034,7 @@ onOpenChainTick env chainTime pendingDeposits st = nextSn = confirmedSn + 1 - Environment{party} = env + Environment{party, snapshotInterval} = env CoordinatedHeadState { localTxs @@ -1017,6 +1043,7 @@ onOpenChainTick env chainTime pendingDeposits st = , version , decommitTx , currentDepositTxId + , lastSnapshotTime } = coordinatedHeadState Snapshot{number = confirmedSn} = getSnapshot confirmedSnapshot @@ -1558,6 +1585,7 @@ aggregate st = \case , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } , chainState , headId @@ -1593,7 +1621,7 @@ aggregate st = \case where CoordinatedHeadState{localTxs} = coordinatedHeadState _otherState -> st - SnapshotRequestDecided{snapshotNumber} -> + SnapshotRequestDecided{snapshotNumber, requestedAt} -> case st of Open os@OpenState{coordinatedHeadState} -> Open @@ -1605,10 +1633,12 @@ aggregate st = \case { lastSeen = seenSnapshotNumber seenSnapshot , requested = snapshotNumber } + , -- Update lastSnapshotTime if requestedAt is provided (from time-based triggers) + lastSnapshotTime = requestedAt <|> lastSnapshotTime } } where - CoordinatedHeadState{seenSnapshot} = coordinatedHeadState + CoordinatedHeadState{seenSnapshot, lastSnapshotTime} = coordinatedHeadState _otherState -> st SnapshotRequested{snapshot, requestedTxIds, newLocalUTxO, newLocalTxs, newCurrentDepositTxId} -> case st of diff --git a/hydra-node/src/Hydra/HeadLogic/Outcome.hs b/hydra-node/src/Hydra/HeadLogic/Outcome.hs index b70cf5e0dd4..98b9a240562 100644 --- a/hydra-node/src/Hydra/HeadLogic/Outcome.hs +++ b/hydra-node/src/Hydra/HeadLogic/Outcome.hs @@ -86,7 +86,12 @@ data StateChanged tx , tx :: tx , newLocalUTxO :: UTxOType tx } - | SnapshotRequestDecided {snapshotNumber :: SnapshotNumber} + | SnapshotRequestDecided + { snapshotNumber :: SnapshotNumber + , requestedAt :: Maybe UTCTime + -- ^ Time when the snapshot was requested. Used for time-based throttling. + -- 'Nothing' if the time was not available at request time (e.g., non-tick events). + } | -- | A snapshot was requested by some party. -- NOTE: We deliberately already include an updated local ledger state to -- not need a ledger to interpret this event. @@ -167,7 +172,7 @@ genStateChanged env = , HeadOpened <$> arbitrary <*> arbitrary <*> arbitrary , TransactionReceived <$> arbitrary , TransactionAppliedToLocalUTxO <$> arbitrary <*> arbitrary <*> arbitrary - , SnapshotRequestDecided <$> arbitrary + , SnapshotRequestDecided <$> arbitrary <*> arbitrary , SnapshotRequested <$> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary , PartySignedSnapshot <$> arbitrary <*> arbitrary <*> arbitrary , SnapshotConfirmed <$> arbitrary <*> arbitrary <*> arbitrary diff --git a/hydra-node/src/Hydra/HeadLogic/State.hs b/hydra-node/src/Hydra/HeadLogic/State.hs index b44291ca701..f86721b7f69 100644 --- a/hydra-node/src/Hydra/HeadLogic/State.hs +++ b/hydra-node/src/Hydra/HeadLogic/State.hs @@ -172,7 +172,7 @@ data CoordinatedHeadState tx = CoordinatedHeadState , confirmedSnapshot :: ConfirmedSnapshot tx -- ^ The latest confirmed snapshot. Spec: Sฬ… , seenSnapshot :: SeenSnapshot tx - -- ^ Last seen snapshot and signatures accumulator. Spec: Uฬ‚, sฬ‚ and ฮฃฬ‚ + -- ^ Last seen snapshot and signatures accumulator. Spec: ร›, ล and ฮฃฬ‚ , currentDepositTxId :: Maybe (TxIdType tx) -- ^ Current/next deposit to incrementally commit. Spec: Uฮฑ -- TODO: update in spec: Uฮฑ -> tx^#ฮฑ @@ -180,6 +180,10 @@ data CoordinatedHeadState tx = CoordinatedHeadState -- ^ Pending decommit transaction. Spec: txฯ‰ , version :: SnapshotVersion -- ^ Last open state version as observed on chain. Spec: ฬ‚v + , lastSnapshotTime :: Maybe UTCTime + -- ^ Time when the last snapshot was requested. Used for time-based throttling + -- to ensure snapshots are requested at least every snapshotInterval when there + -- are pending transactions. } deriving stock (Generic) diff --git a/hydra-node/src/Hydra/Node.hs b/hydra-node/src/Hydra/Node.hs index 6bc17b7de43..81ab16564cd 100644 --- a/hydra-node/src/Hydra/Node.hs +++ b/hydra-node/src/Hydra/Node.hs @@ -78,6 +78,8 @@ initEnvironment options = do , contestationPeriod , depositPeriod , configuredPeers + , snapshotBatchSize + , snapshotInterval } where -- XXX: This is mostly a cardano-specific initialization step of loading @@ -118,6 +120,8 @@ initEnvironment options = do , chainConfig , advertise , peers + , snapshotBatchSize + , snapshotInterval } = options -- | Checks that command line options match a given 'HeadState'. This function diff --git a/hydra-node/src/Hydra/Node/Environment.hs b/hydra-node/src/Hydra/Node/Environment.hs index 9c28a59707f..191fa684bcf 100644 --- a/hydra-node/src/Hydra/Node/Environment.hs +++ b/hydra-node/src/Hydra/Node/Environment.hs @@ -1,13 +1,19 @@ +{-# LANGUAGE RecordWildCards #-} + module Hydra.Node.Environment where import Hydra.Prelude +import Data.Aeson (object, withObject, (.!=), (.=), (.:), (.:?)) +import Data.Time (NominalDiffTime) import Hydra.Node.DepositPeriod (DepositPeriod) import Hydra.Tx.ContestationPeriod (ContestationPeriod) import Hydra.Tx.Crypto (HydraKey, SigningKey) import Hydra.Tx.HeadParameters (HeadParameters (..)) import Hydra.Tx.OnChainId (OnChainId) import Hydra.Tx.Party (HasParty (..), Party) +import Numeric.Natural (Natural) +import Test.QuickCheck (choose) data Environment = Environment { party :: Party @@ -22,12 +28,61 @@ data Environment = Environment , depositPeriod :: DepositPeriod , configuredPeers :: Text -- ^ Configured peers for the network layer, used for comparison on etcd errors. + , snapshotBatchSize :: Natural + -- ^ Number of transactions to accumulate before requesting a snapshot. + -- Default is 10. Set to 1 for legacy behavior (snapshot per transaction). + , snapshotInterval :: NominalDiffTime + -- ^ Maximum time interval between snapshots when there are pending transactions. + -- Default is 100ms. Snapshots are requested if this interval passes AND localTxs > 0. } deriving stock (Generic, Show, Eq) - deriving anyclass (ToJSON, FromJSON) + +-- Custom JSON instances to handle NominalDiffTime serialization +instance ToJSON Environment where + toJSON Environment{..} = + object + [ "party" .= party + , "signingKey" .= signingKey + , "otherParties" .= otherParties + , "participants" .= participants + , "contestationPeriod" .= contestationPeriod + , "depositPeriod" .= depositPeriod + , "configuredPeers" .= configuredPeers + , "snapshotBatchSize" .= snapshotBatchSize + , "snapshotInterval" .= (realToFrac snapshotInterval :: Double) + ] + +instance FromJSON Environment where + parseJSON = withObject "Environment" $ \o -> do + party <- o .: "party" + signingKey <- o .: "signingKey" + otherParties <- o .: "otherParties" + participants <- o .: "participants" + contestationPeriod <- o .: "contestationPeriod" + depositPeriod <- o .: "depositPeriod" + configuredPeers <- o .: "configuredPeers" + snapshotBatchSize <- o .:? "snapshotBatchSize" .!= 10 + snapshotIntervalSeconds <- o .:? "snapshotInterval" .!= (0.1 :: Double) + let snapshotInterval = realToFrac snapshotIntervalSeconds + pure Environment{..} instance Arbitrary Environment where - arbitrary = genericArbitrary + arbitrary = do + party <- arbitrary + signingKey <- arbitrary + otherParties <- arbitrary + participants <- arbitrary + contestationPeriod <- arbitrary + depositPeriod <- arbitrary + configuredPeers <- arbitrary + -- snapshotBatchSize must be >= 1 per JSON schema + snapshotBatchSizeInt <- choose (1, 100) :: Gen Int + let snapshotBatchSize = fromIntegral snapshotBatchSizeInt + -- Generate snapshot interval values that roundtrip cleanly through Double + -- Use multiples of 0.0625 (1/16 second) which are exactly representable + snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s + let snapshotInterval = realToFrac (fromIntegral snapshotIntervalUnits * 0.0625 :: Double) + pure Environment{..} shrink = genericShrink instance HasParty Environment where diff --git a/hydra-node/src/Hydra/Options.hs b/hydra-node/src/Hydra/Options.hs index 148f21e9052..0080ebf3c38 100644 --- a/hydra-node/src/Hydra/Options.hs +++ b/hydra-node/src/Hydra/Options.hs @@ -19,6 +19,7 @@ import Data.ByteString.Char8 qualified as BSC import Data.IP (IP (IPv4), toIPv4, toIPv4w) import Data.Text (unpack) import Data.Text qualified as T +import Data.Time (NominalDiffTime) import Data.Version (showVersion) import Hydra.Cardano.Api ( ChainPoint (..), @@ -47,6 +48,7 @@ import Options.Applicative ( Parser, ParserInfo, ParserResult (..), + ReadM, auto, command, completer, @@ -79,6 +81,8 @@ import Options.Applicative ( ) import Options.Applicative.Builder (str) import Options.Applicative.Help (vsep) +import Numeric (showFFloat) +import Numeric.Natural (Natural) import Test.QuickCheck (Positive (..), choose, elements, listOf, listOf1, oneof, vectorOf) data Command @@ -215,6 +219,12 @@ data RunOptions = RunOptions , ledgerConfig :: LedgerConfig , whichEtcd :: WhichEtcd , apiTransactionTimeout :: ApiTransactionTimeout + , snapshotBatchSize :: Natural + -- ^ Number of transactions to accumulate before requesting a snapshot. + -- Default is 10. Set to 1 for legacy behavior (snapshot per transaction). + , snapshotInterval :: NominalDiffTime + -- ^ Maximum time interval between snapshots when there are pending transactions. + -- Default is 100ms. Snapshots are requested if this interval passes AND localTxs > 0. } deriving stock (Eq, Show, Generic) deriving anyclass (ToJSON, FromJSON) @@ -251,6 +261,12 @@ instance Arbitrary RunOptions where ledgerConfig <- arbitrary whichEtcd <- arbitrary apiTransactionTimeout <- arbitrary + snapshotBatchSizeInt <- choose (1, 100) :: Gen Int + let snapshotBatchSize = fromIntegral snapshotBatchSizeInt + -- Generate snapshot interval values that roundtrip cleanly through Double + -- Use multiples of 0.0625 (1/16 second) which are exactly representable + snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s + let snapshotInterval = realToFrac (fromIntegral snapshotIntervalUnits * 0.0625 :: Double) pure $ RunOptions { verbosity @@ -271,6 +287,8 @@ instance Arbitrary RunOptions where , ledgerConfig , whichEtcd , apiTransactionTimeout + , snapshotBatchSize + , snapshotInterval } shrink = genericShrink @@ -297,10 +315,20 @@ defaultRunOptions = , ledgerConfig = defaultLedgerConfig , whichEtcd = EmbeddedEtcd , apiTransactionTimeout = 300 + , snapshotBatchSize = defaultSnapshotBatchSize + , snapshotInterval = defaultSnapshotInterval } where localhost = IPv4 $ toIPv4 [127, 0, 0, 1] +-- | Default snapshot batch size (number of transactions before requesting snapshot). +defaultSnapshotBatchSize :: Natural +defaultSnapshotBatchSize = 10 + +-- | Default snapshot interval (100ms). +defaultSnapshotInterval :: NominalDiffTime +defaultSnapshotInterval = 0.1 + -- | Parser for running the cardano-node with all its 'RunOptions'. runOptionsParser :: Parser RunOptions runOptionsParser = @@ -323,6 +351,8 @@ runOptionsParser = <*> ledgerConfigParser <*> whichEtcdParser <*> apiTransactionTimeoutParser + <*> snapshotBatchSizeParser + <*> snapshotIntervalParser whichEtcdParser :: Parser WhichEtcd whichEtcdParser = @@ -815,6 +845,36 @@ apiTransactionTimeoutParser = \takes longer than this, it will be cancelled." ) +snapshotBatchSizeParser :: Parser Natural +snapshotBatchSizeParser = + option + auto + ( long "snapshot-batch-size" + <> metavar "NATURAL" + <> value defaultSnapshotBatchSize + <> showDefault + <> completer (listCompleter ["1", "5", "10", "20", "50"]) + <> help + "Number of transactions to accumulate before requesting a snapshot. \ + \Set to 1 for legacy behavior (snapshot per transaction). \ + \Higher values reduce snapshot overhead but increase latency." + ) + +snapshotIntervalParser :: Parser NominalDiffTime +snapshotIntervalParser = + option + (realToFrac <$> (auto :: ReadM Double)) + ( long "snapshot-interval" + <> metavar "SECONDS" + <> value defaultSnapshotInterval + <> showDefault + <> completer (listCompleter ["0.05", "0.1", "0.2", "0.5", "1.0"]) + <> help + "Maximum time interval (in seconds) between snapshots when there are pending transactions. \ + \Snapshots are requested if this interval passes AND there is at least one pending transaction. \ + \Combined with --snapshot-batch-size, this provides hybrid throttling." + ) + startChainFromParser :: Parser ChainPoint startChainFromParser = option @@ -1031,6 +1091,8 @@ toArgs , ledgerConfig , whichEtcd , apiTransactionTimeout + , snapshotBatchSize + , snapshotInterval } = isVerbose verbosity <> ["--node-id", unpack nId] @@ -1050,9 +1112,16 @@ toArgs <> argsChainConfig chainConfig <> argsLedgerConfig <> ["--api-transaction-timeout", show apiTransactionTimeout] + <> ["--snapshot-batch-size", show snapshotBatchSize] + <> ["--snapshot-interval", showSnapshotInterval snapshotInterval] where (NodeId nId) = nodeId + -- | Show snapshot interval with full precision for correct roundtrip parsing + showSnapshotInterval :: NominalDiffTime -> String + showSnapshotInterval t = + showFFloat Nothing (realToFrac t :: Double) "" + toWhichEtcd = \case SystemEtcd -> ["--use-system-etcd"] EmbeddedEtcd -> [] diff --git a/hydra-node/test/Hydra/BehaviorSpec.hs b/hydra-node/test/Hydra/BehaviorSpec.hs index 1f0e874b060..e1e7d88e39d 100644 --- a/hydra-node/test/Hydra/BehaviorSpec.hs +++ b/hydra-node/test/Hydra/BehaviorSpec.hs @@ -1362,6 +1362,8 @@ createHydraNode tracer ledger chainState signingKey otherParties outputs message , participants , depositPeriod = dp , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } party = deriveParty signingKey diff --git a/hydra-node/test/Hydra/HeadLogicSnapshotSpec.hs b/hydra-node/test/Hydra/HeadLogicSnapshotSpec.hs index 5c9fedab19a..efeb245e8bc 100644 --- a/hydra-node/test/Hydra/HeadLogicSnapshotSpec.hs +++ b/hydra-node/test/Hydra/HeadLogicSnapshotSpec.hs @@ -49,6 +49,8 @@ spec = do , depositPeriod = defaultDepositPeriod , participants = deriveOnChainId <$> threeParties , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } let coordinatedHeadState = @@ -61,6 +63,7 @@ spec = do , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } let sendReqSn :: Effect tx -> Bool sendReqSn = \case @@ -196,6 +199,8 @@ prop_singleMemberHeadAlwaysSnapshotOnReqTx sn = monadicST $ do , depositPeriod = defaultDepositPeriod , participants = [deriveOnChainId party] , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } st = CoordinatedHeadState @@ -207,6 +212,7 @@ prop_singleMemberHeadAlwaysSnapshotOnReqTx sn = monadicST $ do , currentDepositTxId = Nothing , decommitTx = Nothing , version + , lastSnapshotTime = Nothing } outcome = update aliceEnv simpleLedger (inOpenState' [alice] st) $ receiveMessage $ ReqTx tx Snapshot{number = confirmedSn} = getSnapshot sn diff --git a/hydra-node/test/Hydra/HeadLogicSpec.hs b/hydra-node/test/Hydra/HeadLogicSpec.hs index 6f698e49d2d..976efaf485a 100644 --- a/hydra-node/test/Hydra/HeadLogicSpec.hs +++ b/hydra-node/test/Hydra/HeadLogicSpec.hs @@ -71,6 +71,8 @@ spec = , depositPeriod = defaultDepositPeriod , participants = deriveOnChainId <$> threeParties , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } aliceEnv = Environment @@ -81,6 +83,8 @@ spec = , depositPeriod = defaultDepositPeriod , participants = deriveOnChainId <$> threeParties , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } describe "Coordinated Head Protocol" $ do @@ -96,6 +100,7 @@ spec = , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } it "reports if a requested tx is expired" $ do @@ -962,6 +967,7 @@ spec = , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } , chainState = ChainStateAt{spendableUTxO = mempty, recordedAt = Just $ ChainPoint 0 blockHash} , headId = testHeadId @@ -1058,6 +1064,7 @@ spec = , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } , chainState = Prelude.error "should not be used" , headId = testHeadId @@ -1099,6 +1106,7 @@ spec = , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } , chainState = Prelude.error "should not be used" , headId = testHeadId @@ -1269,6 +1277,7 @@ inOpenState parties = , currentDepositTxId = Nothing , decommitTx = Nothing , version = 0 + , lastSnapshotTime = Nothing } where u0 = mempty diff --git a/hydra-node/test/Hydra/NodeSpec.hs b/hydra-node/test/Hydra/NodeSpec.hs index e6b76d4b17c..5c9bfa1dd64 100644 --- a/hydra-node/test/Hydra/NodeSpec.hs +++ b/hydra-node/test/Hydra/NodeSpec.hs @@ -301,6 +301,8 @@ spec = parallel $ do , depositPeriod = defaultDepositPeriod , participants = error "should not be recorded in head state" , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } nodeState = inInitialState [alice, bob] @@ -474,6 +476,8 @@ testHydraNode tracer signingKey otherParties contestationPeriod inputs = do , depositPeriod = defaultDepositPeriod , participants , configuredPeers = "" + , snapshotBatchSize = 1 + , snapshotInterval = 0.1 } party = deriveParty signingKey diff --git a/hydra-node/testlib/Test/Hydra/Node/Fixture.hs b/hydra-node/testlib/Test/Hydra/Node/Fixture.hs index 40c08ee8db1..1bd6786dc20 100644 --- a/hydra-node/testlib/Test/Hydra/Node/Fixture.hs +++ b/hydra-node/testlib/Test/Hydra/Node/Fixture.hs @@ -51,4 +51,6 @@ testEnvironment = , depositPeriod = DepositPeriod 20 , participants = deriveOnChainId <$> [alice, bob, carol] , configuredPeers = "" + , snapshotBatchSize = 1 -- Legacy behavior: snapshot per transaction + , snapshotInterval = 0.1 -- 100ms default interval } From 87dbe4743fc6d109bc73ae36176544c1db5f2bf4 Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Thu, 8 Jan 2026 15:05:31 +0800 Subject: [PATCH 10/11] style: fix fourmolu formatting for hybrid snapshot throttling --- hydra-node/src/Hydra/HeadLogic.hs | 32 ++++++++++++------------ hydra-node/src/Hydra/Node/Environment.hs | 4 +-- hydra-node/src/Hydra/Options.hs | 8 +++--- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/hydra-node/src/Hydra/HeadLogic.hs b/hydra-node/src/Hydra/HeadLogic.hs index d092563a472..3260560407b 100644 --- a/hydra-node/src/Hydra/HeadLogic.hs +++ b/hydra-node/src/Hydra/HeadLogic.hs @@ -987,28 +987,28 @@ onOpenChainTick env chainTime pendingDeposits st = -- biased). This is also weird because we want to actually apply the state -- change and also to determine the next active. withNextActive (newActive <> newExpired <> pendingDeposits) $ \depositTxId -> - -- REVIEW: this is not really a wait, but discard? - -- TODO: Spec: wait tx๐œ” = โŠฅ โˆง ๐‘ˆ๐›ผ = โˆ… - if isNothing decommitTx - && isNothing currentDepositTxId - && not snapshotInFlight - && isLeader parameters party nextSn - then - -- XXX: This state update has no equivalence in the - -- spec. Do we really need to store that we have - -- requested a snapshot? If yes, should update spec. - newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Just chainTime} - -- Spec: multicast (reqSn,ฬ‚ ๐‘ฃ,ฬ„ ๐’ฎ.๐‘  + 1,ฬ‚ ๐’ฏ, ๐‘ˆ๐›ผ, โŠฅ) - <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) Nothing (Just depositTxId)) - else - noop + -- REVIEW: this is not really a wait, but discard? + -- TODO: Spec: wait tx๐œ” = โŠฅ โˆง ๐‘ˆ๐›ผ = โˆ… + if isNothing decommitTx + && isNothing currentDepositTxId + && not snapshotInFlight + && isLeader parameters party nextSn + then + -- XXX: This state update has no equivalence in the + -- spec. Do we really need to store that we have + -- requested a snapshot? If yes, should update spec. + newState SnapshotRequestDecided{snapshotNumber = nextSn, requestedAt = Just chainTime} + -- Spec: multicast (reqSn,ฬ‚ ๐‘ฃ,ฬ„ ๐’ฎ.๐‘  + 1,ฬ‚ ๐’ฏ, ๐‘ˆ๐›ผ, โŠฅ) + <> cause (NetworkEffect $ ReqSn version nextSn (txId <$> localTxs) Nothing (Just depositTxId)) + else + noop where -- Time-based snapshot triggering: request snapshot if enough time has passed -- since the last snapshot AND there are pending transactions. maybeTimeBasedSnapshot = let hasPendingTxs = not (null localTxs) intervalPassed = case lastSnapshotTime of - Nothing -> True -- No previous snapshot, allow triggering + Nothing -> True -- No previous snapshot, allow triggering Just lastTime -> diffUTCTime chainTime lastTime >= snapshotInterval in if hasPendingTxs && intervalPassed diff --git a/hydra-node/src/Hydra/Node/Environment.hs b/hydra-node/src/Hydra/Node/Environment.hs index 191fa684bcf..1949420b7a6 100644 --- a/hydra-node/src/Hydra/Node/Environment.hs +++ b/hydra-node/src/Hydra/Node/Environment.hs @@ -4,7 +4,7 @@ module Hydra.Node.Environment where import Hydra.Prelude -import Data.Aeson (object, withObject, (.!=), (.=), (.:), (.:?)) +import Data.Aeson (object, withObject, (.!=), (.:), (.:?), (.=)) import Data.Time (NominalDiffTime) import Hydra.Node.DepositPeriod (DepositPeriod) import Hydra.Tx.ContestationPeriod (ContestationPeriod) @@ -80,7 +80,7 @@ instance Arbitrary Environment where let snapshotBatchSize = fromIntegral snapshotBatchSizeInt -- Generate snapshot interval values that roundtrip cleanly through Double -- Use multiples of 0.0625 (1/16 second) which are exactly representable - snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s + snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s let snapshotInterval = realToFrac (fromIntegral snapshotIntervalUnits * 0.0625 :: Double) pure Environment{..} shrink = genericShrink diff --git a/hydra-node/src/Hydra/Options.hs b/hydra-node/src/Hydra/Options.hs index 0080ebf3c38..cd77f3c9931 100644 --- a/hydra-node/src/Hydra/Options.hs +++ b/hydra-node/src/Hydra/Options.hs @@ -44,6 +44,8 @@ import Hydra.Node.ApiTransactionTimeout (ApiTransactionTimeout (..)) import Hydra.Node.DepositPeriod (DepositPeriod (..)) import Hydra.Tx.ContestationPeriod (ContestationPeriod, fromNominalDiffTime) import Hydra.Tx.HeadId (HeadSeed) +import Numeric (showFFloat) +import Numeric.Natural (Natural) import Options.Applicative ( Parser, ParserInfo, @@ -81,8 +83,6 @@ import Options.Applicative ( ) import Options.Applicative.Builder (str) import Options.Applicative.Help (vsep) -import Numeric (showFFloat) -import Numeric.Natural (Natural) import Test.QuickCheck (Positive (..), choose, elements, listOf, listOf1, oneof, vectorOf) data Command @@ -265,7 +265,7 @@ instance Arbitrary RunOptions where let snapshotBatchSize = fromIntegral snapshotBatchSizeInt -- Generate snapshot interval values that roundtrip cleanly through Double -- Use multiples of 0.0625 (1/16 second) which are exactly representable - snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s + snapshotIntervalUnits <- choose (1, 160) :: Gen Int -- 0.0625s to 10s let snapshotInterval = realToFrac (fromIntegral snapshotIntervalUnits * 0.0625 :: Double) pure $ RunOptions @@ -1117,7 +1117,7 @@ toArgs where (NodeId nId) = nodeId - -- | Show snapshot interval with full precision for correct roundtrip parsing + -- \| Show snapshot interval with full precision for correct roundtrip parsing showSnapshotInterval :: NominalDiffTime -> String showSnapshotInterval t = showFFloat Nothing (realToFrac t :: Double) "" From 8c12ef2e1c479b0f179288e332edcdd9e560559f Mon Sep 17 00:00:00 2001 From: Jack Chan Date: Thu, 8 Jan 2026 15:33:34 +0800 Subject: [PATCH 11/11] fix: add snapshotBatchSize and snapshotInterval to hydra-cluster RunOptions --- hydra-cluster/src/HydraNode.hs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hydra-cluster/src/HydraNode.hs b/hydra-cluster/src/HydraNode.hs index 958cd60535b..f839122517b 100644 --- a/hydra-cluster/src/HydraNode.hs +++ b/hydra-cluster/src/HydraNode.hs @@ -26,7 +26,7 @@ import Hydra.HeadLogic.State (SeenSnapshot) import Hydra.Logging (Tracer, Verbosity (..), traceWith) import Hydra.Network (Host (Host), NodeId (NodeId), WhichEtcd (EmbeddedEtcd)) import Hydra.Network qualified as Network -import Hydra.Options (BlockfrostOptions (..), CardanoChainConfig (..), ChainBackendOptions (..), ChainConfig (..), DirectOptions (..), LedgerConfig (..), RunOptions (..), defaultBFQueryTimeout, defaultCardanoChainConfig, defaultDirectOptions, nodeSocket, toArgs) +import Hydra.Options (BlockfrostOptions (..), CardanoChainConfig (..), ChainBackendOptions (..), ChainConfig (..), DirectOptions (..), LedgerConfig (..), RunOptions (..), defaultBFQueryTimeout, defaultCardanoChainConfig, defaultDirectOptions, defaultSnapshotBatchSize, defaultSnapshotInterval, nodeSocket, toArgs) import Hydra.Tx (ConfirmedSnapshot) import Hydra.Tx.ContestationPeriod (ContestationPeriod) import Hydra.Tx.Crypto (HydraKey) @@ -423,6 +423,8 @@ prepareHydraNode chainConfig workDir hydraNodeId hydraSKey hydraVKeys allNodeIds { cardanoLedgerProtocolParametersFile } , apiTransactionTimeout = 100000 + , snapshotBatchSize = defaultSnapshotBatchSize + , snapshotInterval = defaultSnapshotInterval } where port = fromIntegral $ 5_000 + hydraNodeId