From 74da78363938b142757230495770b88bb4596291 Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 10 Sep 2025 23:24:54 +0100 Subject: [PATCH 01/23] tapdb: migrate supply tables to use 32-byte group keys Updates the supply commit tables to store group keys in canonical 32-byte x-only form as defined in BIP340 (schnorr.SerializePubKey) instead of the previous 33-byte tweaked format. --- tapdb/migrations.go | 2 +- ...ables_schnorr_serialize_group_key.down.sql | 288 +++++++++++++++++ ..._tables_schnorr_serialize_group_key.up.sql | 300 ++++++++++++++++++ tapdb/sqlc/schemas/generated_schema.sql | 89 +++--- 4 files changed, 641 insertions(+), 38 deletions(-) create mode 100644 tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.down.sql create mode 100644 tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.up.sql diff --git a/tapdb/migrations.go b/tapdb/migrations.go index 6c3f948b6..12cb6ed76 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -24,7 +24,7 @@ const ( // daemon. // // NOTE: This MUST be updated when a new migration is added. - LatestMigrationVersion = 46 + LatestMigrationVersion = 47 ) // DatabaseBackend is an interface that contains all methods our different diff --git a/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.down.sql b/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.down.sql new file mode 100644 index 000000000..7da8556b8 --- /dev/null +++ b/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.down.sql @@ -0,0 +1,288 @@ +------------------------------------------------------------------------------- +-- Down migration: revert 32-byte group keys back to 33-byte where applicable. +-- Drop all relevant dependants and tables first (indexes, then tables). +------------------------------------------------------------------------------- + +-- universe_supply_leaves. +DROP INDEX IF EXISTS universe_supply_leaves_supply_root_id_type_idx; +DROP TABLE IF EXISTS universe_supply_leaves; + +-- universe_supply_roots. +DROP INDEX IF EXISTS universe_supply_roots_group_key_idx; +DROP TABLE IF EXISTS universe_supply_roots; + +-- supply_update_events. +DROP INDEX IF EXISTS supply_update_events_transition_id_idx; +DROP TABLE IF EXISTS supply_update_events; + +-- supply_syncer_push_log. +DROP INDEX IF EXISTS supply_syncer_push_log_group_key_idx; +DROP INDEX IF EXISTS supply_syncer_push_log_server_address_idx; +DROP TABLE IF EXISTS supply_syncer_push_log; + +-- supply_commit_transitions. +DROP INDEX IF EXISTS supply_commit_transitions_single_pending_idx; +DROP INDEX IF EXISTS supply_commit_transitions_state_machine_group_key_idx; +DROP TABLE IF EXISTS supply_commit_transitions; + +-- supply_commit_state_machines. +DROP TABLE IF EXISTS supply_commit_state_machines; + +-- mint_supply_pre_commits. +DROP INDEX IF EXISTS mint_anchor_uni_commitments_outpoint_idx; +DROP INDEX IF EXISTS mint_anchor_uni_commitments_unique; +DROP TABLE IF EXISTS mint_supply_pre_commits; + +-- supply_pre_commits. +DROP INDEX IF EXISTS supply_pre_commits_idx_group_key; +DROP INDEX IF EXISTS supply_pre_commits_unique_outpoint; +DROP TABLE IF EXISTS supply_pre_commits; + +-- supply_commitments. +DROP INDEX IF EXISTS supply_commitments_chain_txn_id_idx; +DROP INDEX IF EXISTS supply_commitments_group_key_idx; +DROP INDEX IF EXISTS supply_commitments_outpoint_uk; +DROP INDEX IF EXISTS supply_commitments_spent_commitment_idx; +DROP TABLE IF EXISTS supply_commitments; + +------------------------------------------------------------------------------- +-- Recreate tables and indexes with 33-byte group_key where they were 32-byte. +------------------------------------------------------------------------------- + +-- Recreate universe_supply_roots with original 33-byte group_key. +CREATE TABLE universe_supply_roots ( + id INTEGER PRIMARY KEY, + + -- The namespace root of the MS-SMT representing this supply tree. + -- We set the foreign key constraint evaluation to be deferred until after + -- the database transaction ends. Otherwise, if the root of the SMT is + -- deleted temporarily before inserting a new root, then this constraint + -- is violated. + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + + -- The tweaked group key identifying the asset group this supply tree belongs to. + group_key BLOB UNIQUE NOT NULL CHECK(length(group_key) = 33) +); + +-- Recreate the index. +CREATE INDEX universe_supply_roots_group_key_idx ON universe_supply_roots(group_key); + +-- Recreate dependant table and FK. +CREATE TABLE universe_supply_leaves ( + id INTEGER PRIMARY KEY, + + -- Reference to the root supply tree this leaf belongs to. + supply_root_id BIGINT NOT NULL REFERENCES universe_supply_roots(id) ON DELETE CASCADE, + + -- The type of sub-tree this leaf represents (mint_supply, burn, ignore). + sub_tree_type TEXT NOT NULL REFERENCES proof_types(proof_type), + + -- The key used for this leaf within the root supply tree's MS-SMT. + leaf_node_key BLOB NOT NULL, + + -- The namespace within mssmt_nodes where the actual sub-tree root node resides. + leaf_node_namespace VARCHAR NOT NULL +); + +CREATE UNIQUE INDEX universe_supply_leaves_supply_root_id_type_idx + ON universe_supply_leaves (supply_root_id, sub_tree_type); + +-- Recreate supply_syncer_push_log with original 33-byte group_key. +CREATE TABLE supply_syncer_push_log ( + id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this push log belongs to. + -- This should match the group_key format used in universe_supply_roots. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The highest block height among all supply leaves in this push. + max_pushed_block_height INTEGER NOT NULL, + + -- The server address (host:port) where the commitment was pushed. + server_address TEXT NOT NULL, + + -- The transaction ID (hash) of the supply commitment. + commit_txid BLOB NOT NULL CHECK(length(commit_txid) = 32), + + -- The supply commitment output index within the commitment transaction. + output_index INTEGER NOT NULL, + + -- The number of leaves included in this specific push. + num_leaves_pushed INTEGER NOT NULL, + + -- The timestamp when this push log entry was created (unix timestamp in seconds). + created_at BIGINT NOT NULL +); + +-- Recreate the indexes. +CREATE INDEX supply_syncer_push_log_group_key_idx ON supply_syncer_push_log(group_key); +CREATE INDEX supply_syncer_push_log_server_address_idx ON supply_syncer_push_log(server_address); + +-- Recreate supply_commitments with original 33-byte group_key. +CREATE TABLE supply_commitments ( + commit_id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this commitment belongs to. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The chain transaction that included this commitment. + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- The output index within the chain_txn_id transaction for the commitment. + output_index INTEGER, + + -- The internal key used for the commitment output. + internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The taproot output key used for the commitment output. + output_key BLOB NOT NULL CHECK(length(output_key) = 33), + + -- The block header of the block mining the commitment transaction. + block_header BLOB, + + -- The block height at which the commitment transaction was confirmed. + -- Can be NULL if the transaction is not yet confirmed. + block_height INTEGER, + + -- The merkle proof demonstrating the commitment's inclusion in the block. + merkle_proof BLOB, + + -- The root hash of the supply commitment at this snapshot. + supply_root_hash BLOB, + + -- The root sum of the supply commitment at this snapshot. + supply_root_sum BIGINT, + + spent_commitment BIGINT REFERENCES supply_commitments(commit_id) +); + +-- Recreate the indexes. +CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); +CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); +CREATE UNIQUE INDEX supply_commitments_outpoint_uk ON supply_commitments(chain_txn_id, output_index); +CREATE INDEX supply_commitments_spent_commitment_idx ON supply_commitments(spent_commitment); + +-- Recreate supply_commit_state_machines with original 33-byte group_key. +CREATE TABLE supply_commit_state_machines ( + -- The tweaked group key identifying the asset group's state machine. + group_key BLOB PRIMARY KEY CHECK(length(group_key) = 33), + + -- The current state of the state machine. + current_state_id INTEGER NOT NULL REFERENCES supply_commit_states(id), + + -- The latest successfully committed supply state on chain. + -- Can be NULL if no commitment has been made yet. + latest_commitment_id BIGINT REFERENCES supply_commitments(commit_id) +); + +-- Recreate supply_commit_transitions. +CREATE TABLE supply_commit_transitions ( + transition_id INTEGER PRIMARY KEY, + + -- Reference back to the state machine this transition belongs to. + state_machine_group_key BLOB NOT NULL REFERENCES supply_commit_state_machines(group_key), + + -- The commitment being replaced by this transition. + -- Can be NULL if this is the first commitment. + old_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The new commitment that this transition aims to create. + -- Can be NULL initially, before the commitment details are created. + new_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The chain transaction that, once confirmed, will finalize this transition. + -- Can be NULL until the transaction is created and signed. + pending_commit_txn_id BIGINT REFERENCES chain_txns(txn_id), + + -- Indicates if this transition is frozen and should not accept new updates. + frozen BOOLEAN NOT NULL DEFAULT FALSE, + + -- Indicates if this transition has been successfully completed and committed. + finalized BOOLEAN NOT NULL DEFAULT FALSE, + + -- Timestamp when this transition was initiated (unix timestamp in seconds). + creation_time BIGINT NOT NULL +); + +CREATE UNIQUE INDEX supply_commit_transitions_single_pending_idx + ON supply_commit_transitions (state_machine_group_key) WHERE finalized = FALSE; +CREATE INDEX supply_commit_transitions_state_machine_group_key_idx + ON supply_commit_transitions(state_machine_group_key); + +-- Recreate supply_update_events with original 33-byte group_key. +CREATE TABLE supply_update_events ( + event_id INTEGER PRIMARY KEY, + + -- The group key of the asset group this event belongs to. + -- This is needed to query for dangling events for a specific group. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- Reference to the state transition this event is part of. + -- Can be NULL if the event is staged while another transition is active. + transition_id BIGINT REFERENCES supply_commit_transitions(transition_id) ON DELETE CASCADE, + + -- The type of update (mint, burn, ignore). + update_type_id INTEGER NOT NULL REFERENCES supply_commit_update_types(id), + + -- Opaque blob containing the serialized data for the specific + -- SupplyUpdateEvent (NewMintEvent, NewBurnEvent, NewIgnoreEvent). + event_data BLOB NOT NULL +); + +-- Recreate the index. +CREATE INDEX supply_update_events_transition_id_idx ON supply_update_events(transition_id); + +-- Recreate mint_supply_pre_commits with original group_key definition. +CREATE TABLE mint_supply_pre_commits ( + id INTEGER PRIMARY KEY, + + -- The ID of the minting batch this universe commitment relates to. + batch_id INTEGER NOT NULL REFERENCES asset_minting_batches(batch_id), + + -- The index of the mint batch anchor transaction pre-commitment output. + tx_output_index INTEGER NOT NULL, + + -- The Taproot output internal key for the pre-commitment output. + taproot_internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The commitment that spent this pre-commitment output, if any. + spent_by BIGINT REFERENCES supply_commitments(commit_id), + + -- The outpoint of the pre-commitment output (txid || vout). + outpoint BLOB, + + -- The asset group key for this pre-commitment. + -- Restored to original definition without length check. + group_key BLOB +); + +-- Recreate the indexes. +CREATE INDEX mint_anchor_uni_commitments_outpoint_idx + ON mint_supply_pre_commits(outpoint) + WHERE outpoint IS NOT NULL; +CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique + ON mint_supply_pre_commits (batch_id, tx_output_index); + +-- Recreate supply_pre_commits with 33-byte group_key. +CREATE TABLE supply_pre_commits ( + id INTEGER PRIMARY KEY, + + -- The asset group key for this supply pre-commitment. + -- Restored to 33-byte length check. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The taproot internal key of the pre-commitment transaction output. + taproot_internal_key BLOB NOT NULL CHECK(length(taproot_internal_key) = 33), + + -- The pre-commit outpoint from the mint anchor transaction. + outpoint BLOB NOT NULL CHECK(length(outpoint) > 0), + + -- The chain transaction that included this pre-commitment output. + chain_txn_db_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- Reference to supply commitment which spends this pre-commitment. + spent_by BIGINT REFERENCES supply_commitments(commit_id) +); + +CREATE INDEX supply_pre_commits_idx_group_key ON supply_pre_commits(group_key); +CREATE UNIQUE INDEX supply_pre_commits_unique_outpoint ON supply_pre_commits(outpoint); diff --git a/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.up.sql b/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.up.sql new file mode 100644 index 000000000..76786db4c --- /dev/null +++ b/tapdb/sqlc/migrations/000047_supply_tables_schnorr_serialize_group_key.up.sql @@ -0,0 +1,300 @@ +------------------------------------------------------------------------------- +-- Drop all relevant dependants and tables first (indexes, then tables). +------------------------------------------------------------------------------- + +-- universe_supply_leaves. +DROP INDEX IF EXISTS universe_supply_leaves_supply_root_id_type_idx; +DROP TABLE IF EXISTS universe_supply_leaves; + +-- universe_supply_roots. +DROP INDEX IF EXISTS universe_supply_roots_group_key_idx; +DROP TABLE IF EXISTS universe_supply_roots; + +-- supply_update_events. +DROP INDEX IF EXISTS supply_update_events_transition_id_idx; +DROP TABLE IF EXISTS supply_update_events; + +-- supply_syncer_push_log. +DROP INDEX IF EXISTS supply_syncer_push_log_group_key_idx; +DROP INDEX IF EXISTS supply_syncer_push_log_server_address_idx; +DROP TABLE IF EXISTS supply_syncer_push_log; + +-- supply_commit_transitions. +DROP INDEX IF EXISTS supply_commit_transitions_single_pending_idx; +DROP INDEX IF EXISTS supply_commit_transitions_state_machine_group_key_idx; +DROP TABLE IF EXISTS supply_commit_transitions; + +-- supply_commit_state_machines. +DROP TABLE IF EXISTS supply_commit_state_machines; + +-- mint_supply_pre_commits (depends on supply_commitments). +DROP INDEX IF EXISTS mint_anchor_uni_commitments_outpoint_idx; +DROP INDEX IF EXISTS mint_anchor_uni_commitments_unique; +DROP TABLE IF EXISTS mint_supply_pre_commits; + +-- supply_pre_commits (depends on supply_commitments). +DROP INDEX IF EXISTS supply_pre_commits_idx_group_key; +DROP INDEX IF EXISTS supply_pre_commits_unique_outpoint; +DROP TABLE IF EXISTS supply_pre_commits; + +-- supply_commitments. +DROP INDEX IF EXISTS supply_commitments_chain_txn_id_idx; +DROP INDEX IF EXISTS supply_commitments_group_key_idx; +DROP INDEX IF EXISTS supply_commitments_outpoint_uk; +DROP INDEX IF EXISTS supply_commitments_spent_commitment_idx; +DROP TABLE IF EXISTS supply_commitments; + +------------------------------------------------------------------------------- +-- Recreate tables and indexes with 32-byte group_key where applicable. +------------------------------------------------------------------------------- + +-- Recreate universe_supply_roots with updated group_key field (32 bytes instead of 33). +CREATE TABLE universe_supply_roots ( + id INTEGER PRIMARY KEY, + + -- The namespace root of the MS-SMT representing this supply tree. + -- We set the foreign key constraint evaluation to be deferred until after + -- the database transaction ends. Otherwise, if the root of the SMT is + -- deleted temporarily before inserting a new root, then this constraint + -- is violated. + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + + -- The asset group key for this supply pre-commitment. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB UNIQUE NOT NULL CHECK(length(group_key) = 32) +); + +-- Recreate the index. +CREATE INDEX universe_supply_roots_group_key_idx ON universe_supply_roots(group_key); + +-- Recreate dependant table and FK. +CREATE TABLE universe_supply_leaves ( + id INTEGER PRIMARY KEY, + + -- Reference to the root supply tree this leaf belongs to. + supply_root_id BIGINT NOT NULL REFERENCES universe_supply_roots(id) ON DELETE CASCADE, + + -- The type of sub-tree this leaf represents (mint_supply, burn, ignore). + sub_tree_type TEXT NOT NULL REFERENCES proof_types(proof_type), + + -- The key used for this leaf within the root supply tree's MS-SMT. + -- Typically a hash identifying the subtree type. + leaf_node_key BLOB NOT NULL, + + -- The namespace within mssmt_nodes where the actual subtree root node resides. + leaf_node_namespace VARCHAR NOT NULL +); + +CREATE UNIQUE INDEX universe_supply_leaves_supply_root_id_type_idx + ON universe_supply_leaves (supply_root_id, sub_tree_type); + +-- Recreate supply_syncer_push_log with updated group_key field (32 bytes instead of 33). +CREATE TABLE supply_syncer_push_log ( + id INTEGER PRIMARY KEY, + + -- The asset group key identifying the asset group this push log belongs to. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), + + -- The highest block height among all supply leaves in this push. + max_pushed_block_height INTEGER NOT NULL, + + -- The server address (host:port) where the commitment was pushed. + server_address TEXT NOT NULL, + + -- The transaction ID (hash) of the supply commitment. + commit_txid BLOB NOT NULL CHECK(length(commit_txid) = 32), + + -- The supply commitment output index within the commitment transaction. + output_index INTEGER NOT NULL, + + -- The number of leaves included in this specific push (diff count between + -- last commitment and current commitment). + num_leaves_pushed INTEGER NOT NULL, + + -- The timestamp when this push log entry was created (unix timestamp in seconds). + created_at BIGINT NOT NULL +); + +-- Recreate the indexes. +CREATE INDEX supply_syncer_push_log_group_key_idx ON supply_syncer_push_log(group_key); +CREATE INDEX supply_syncer_push_log_server_address_idx ON supply_syncer_push_log(server_address); + +-- Recreate supply_commitments with updated group_key field (32 bytes instead of 33). +CREATE TABLE supply_commitments ( + commit_id INTEGER PRIMARY KEY, + + -- The asset group key identifying the asset group this commitment belongs to. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), + + -- The chain transaction that included this commitment. + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- The output index within the chain_txn_id transaction for the commitment. + output_index INTEGER, + + -- The internal key used for the commitment output. + internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The taproot output key used for the commitment output. + output_key BLOB NOT NULL CHECK(length(output_key) = 33), + + -- The block header of the block mining the commitment transaction. + block_header BLOB, + + -- The block height at which the commitment transaction was confirmed. + -- Can be NULL if the transaction is not yet confirmed. + block_height INTEGER, + + -- The merkle proof demonstrating the commitment's inclusion in the block. + merkle_proof BLOB, + + -- The root hash of the supply commitment at this snapshot. + supply_root_hash BLOB, + + -- The root sum of the supply commitment at this snapshot. + supply_root_sum BIGINT, + + spent_commitment BIGINT REFERENCES supply_commitments(commit_id) +); + +-- Recreate the indexes. +CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); +CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); +CREATE UNIQUE INDEX supply_commitments_outpoint_uk ON supply_commitments(chain_txn_id, output_index); +CREATE INDEX supply_commitments_spent_commitment_idx ON supply_commitments(spent_commitment); + +-- Recreate supply_commit_state_machines with updated group_key field (32 bytes instead of 33). +CREATE TABLE supply_commit_state_machines ( + -- The asset group key identifying the asset group's state machine. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB PRIMARY KEY CHECK(length(group_key) = 32), + + -- The current state of the state machine. + current_state_id INTEGER NOT NULL REFERENCES supply_commit_states(id), + + -- The latest successfully committed supply state on chain. + -- Can be NULL if no commitment has been made yet. + latest_commitment_id BIGINT REFERENCES supply_commitments(commit_id) +); + +-- Recreate supply_commit_transitions (references both commitments and state machines). +CREATE TABLE supply_commit_transitions ( + transition_id INTEGER PRIMARY KEY, + + -- Reference back to the state machine this transition belongs to. + state_machine_group_key BLOB NOT NULL REFERENCES supply_commit_state_machines(group_key), + + -- The commitment being replaced by this transition. + -- Can be NULL if this is the first commitment. + old_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The new commitment that this transition aims to create. + -- Can be NULL initially, before the commitment details are created. + new_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The chain transaction that, once confirmed, will finalize this transition. + -- Can be NULL until the transaction is created and signed. + pending_commit_txn_id BIGINT REFERENCES chain_txns(txn_id), + + -- Indicates if this transition is frozen and should not accept new updates. + frozen BOOLEAN NOT NULL DEFAULT FALSE, + + -- Indicates if this transition has been successfully completed and committed. + finalized BOOLEAN NOT NULL DEFAULT FALSE, + + -- Timestamp when this transition was initiated (unix timestamp in seconds). + creation_time BIGINT NOT NULL +); + +CREATE UNIQUE INDEX supply_commit_transitions_single_pending_idx + ON supply_commit_transitions (state_machine_group_key) WHERE finalized = FALSE; +CREATE INDEX supply_commit_transitions_state_machine_group_key_idx + ON supply_commit_transitions(state_machine_group_key); + +-- Recreate supply_update_events with updated group_key field (32 bytes instead of 33). +CREATE TABLE supply_update_events ( + event_id INTEGER PRIMARY KEY, + + -- The group key of the asset group this event belongs to. + -- This is needed to query for dangling events for a specific group. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), + + -- Reference to the state transition this event is part of. + -- Can be NULL if the event is staged while another transition is active. + transition_id BIGINT REFERENCES supply_commit_transitions(transition_id) ON DELETE CASCADE, + + -- The type of update (mint, burn, ignore). + update_type_id INTEGER NOT NULL REFERENCES supply_commit_update_types(id), + + -- Opaque blob containing the serialized data for the specific + -- SupplyUpdateEvent (NewMintEvent, NewBurnEvent, NewIgnoreEvent). + event_data BLOB NOT NULL +); + +-- Recreate the index. +CREATE INDEX supply_update_events_transition_id_idx ON supply_update_events(transition_id); + +-- Recreate mint_supply_pre_commits with updated group_key field (32 bytes instead of 33). +CREATE TABLE mint_supply_pre_commits ( + id INTEGER PRIMARY KEY, + + -- The ID of the minting batch this universe commitment relates to. + batch_id INTEGER NOT NULL REFERENCES asset_minting_batches(batch_id), + + -- The index of the mint batch anchor transaction pre-commitment output. + tx_output_index INTEGER NOT NULL, + + -- The asset group key for this pre-commitment. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB CHECK(length(group_key) = 32), + + -- The Taproot output internal key for the pre-commitment output. + taproot_internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The commitment that spent this pre-commitment output, if any. + spent_by BIGINT REFERENCES supply_commitments(commit_id), + + -- The outpoint of the pre-commitment output (txid || vout). + outpoint BLOB +); + +-- Recreate the indexes. +CREATE INDEX mint_anchor_uni_commitments_outpoint_idx + ON mint_supply_pre_commits(outpoint) + WHERE outpoint IS NOT NULL; +CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique + ON mint_supply_pre_commits (batch_id, tx_output_index); + +-- Recreate supply_pre_commits with updated group_key field (32 bytes). +CREATE TABLE supply_pre_commits ( + id INTEGER PRIMARY KEY, + + -- The asset group key for this supply pre-commitment. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), + + -- The taproot internal key of the pre-commitment transaction output. + taproot_internal_key BLOB NOT NULL CHECK(length(taproot_internal_key) = 33), + + -- The pre-commit outpoint from the mint anchor transaction. + outpoint BLOB NOT NULL CHECK(length(outpoint) > 0), + + -- The chain transaction that included this pre-commitment output. + chain_txn_db_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- Reference to supply commitment which spends this pre-commitment. + spent_by BIGINT REFERENCES supply_commitments(commit_id) +); + +CREATE INDEX supply_pre_commits_idx_group_key ON supply_pre_commits(group_key); +CREATE UNIQUE INDEX supply_pre_commits_unique_outpoint ON supply_pre_commits(outpoint); diff --git a/tapdb/sqlc/schemas/generated_schema.sql b/tapdb/sqlc/schemas/generated_schema.sql index acb519e8b..ff3712df4 100644 --- a/tapdb/sqlc/schemas/generated_schema.sql +++ b/tapdb/sqlc/schemas/generated_schema.sql @@ -624,14 +624,14 @@ CREATE TABLE managed_utxos ( lease_expiry TIMESTAMP , root_version SMALLINT); -CREATE INDEX mint_anchor_uni_commitments_outpoint_idx - ON "mint_supply_pre_commits"(outpoint) +CREATE INDEX mint_anchor_uni_commitments_outpoint_idx + ON mint_supply_pre_commits(outpoint) WHERE outpoint IS NOT NULL; CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique - ON "mint_supply_pre_commits" (batch_id, tx_output_index); + ON mint_supply_pre_commits (batch_id, tx_output_index); -CREATE TABLE "mint_supply_pre_commits" ( +CREATE TABLE mint_supply_pre_commits ( id INTEGER PRIMARY KEY, -- The ID of the minting batch this universe commitment relates to. @@ -640,11 +640,20 @@ CREATE TABLE "mint_supply_pre_commits" ( -- The index of the mint batch anchor transaction pre-commitment output. tx_output_index INTEGER NOT NULL, + -- The asset group key for this pre-commitment. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB CHECK(length(group_key) = 32), + -- The Taproot output internal key for the pre-commitment output. - group_key BLOB -, taproot_internal_key_id -BIGINT REFERENCES internal_keys(key_id) -NOT NULL, spent_by BIGINT REFERENCES supply_commitments(commit_id), outpoint BLOB); + taproot_internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The commitment that spent this pre-commitment output, if any. + spent_by BIGINT REFERENCES supply_commitments(commit_id), + + -- The outpoint of the pre-commitment output (txid || vout). + outpoint BLOB +); CREATE TABLE mssmt_nodes ( -- hash_key is the hash key by which we reference all nodes. @@ -792,8 +801,10 @@ CREATE TABLE script_keys ( CREATE INDEX status_idx ON addr_events(status); CREATE TABLE supply_commit_state_machines ( - -- The tweaked group key identifying the asset group's state machine. - group_key BLOB PRIMARY KEY CHECK(length(group_key) = 33), + -- The asset group key identifying the asset group's state machine. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB PRIMARY KEY CHECK(length(group_key) = 32), -- The current state of the state machine. current_state_id INTEGER NOT NULL REFERENCES supply_commit_states(id), @@ -839,7 +850,8 @@ CREATE TABLE supply_commit_transitions ( CREATE UNIQUE INDEX supply_commit_transitions_single_pending_idx ON supply_commit_transitions (state_machine_group_key) WHERE finalized = FALSE; -CREATE INDEX supply_commit_transitions_state_machine_group_key_idx ON supply_commit_transitions(state_machine_group_key); +CREATE INDEX supply_commit_transitions_state_machine_group_key_idx + ON supply_commit_transitions(state_machine_group_key); CREATE TABLE supply_commit_update_types ( id INTEGER PRIMARY KEY, @@ -849,8 +861,10 @@ CREATE TABLE supply_commit_update_types ( CREATE TABLE supply_commitments ( commit_id INTEGER PRIMARY KEY, - -- The tweaked group key identifying the asset group this commitment belongs to. - group_key BLOB NOT NULL CHECK(length(group_key) = 33), + -- The asset group key identifying the asset group this commitment belongs to. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), -- The chain transaction that included this commitment. chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), @@ -878,19 +892,18 @@ CREATE TABLE supply_commitments ( supply_root_hash BLOB, -- The root sum of the supply commitment at this snapshot. - supply_root_sum BIGINT -, spent_commitment BIGINT - REFERENCES supply_commitments(commit_id)); + supply_root_sum BIGINT, + + spent_commitment BIGINT REFERENCES supply_commitments(commit_id) +); CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); -CREATE UNIQUE INDEX supply_commitments_outpoint_uk - ON supply_commitments(chain_txn_id, output_index); +CREATE UNIQUE INDEX supply_commitments_outpoint_uk ON supply_commitments(chain_txn_id, output_index); -CREATE INDEX supply_commitments_spent_commitment_idx - ON supply_commitments(spent_commitment); +CREATE INDEX supply_commitments_spent_commitment_idx ON supply_commitments(spent_commitment); CREATE TABLE supply_pre_commits ( id INTEGER PRIMARY KEY, @@ -913,18 +926,17 @@ CREATE TABLE supply_pre_commits ( spent_by BIGINT REFERENCES supply_commitments(commit_id) ); -CREATE INDEX supply_pre_commits_idx_group_key - ON supply_pre_commits(group_key); +CREATE INDEX supply_pre_commits_idx_group_key ON supply_pre_commits(group_key); -CREATE UNIQUE INDEX supply_pre_commits_unique_outpoint - ON supply_pre_commits(outpoint); +CREATE UNIQUE INDEX supply_pre_commits_unique_outpoint ON supply_pre_commits(outpoint); CREATE TABLE supply_syncer_push_log ( id INTEGER PRIMARY KEY, - -- The tweaked group key identifying the asset group this push log belongs - -- to. This should match the group_key format used in universe_supply_roots. - group_key BLOB NOT NULL CHECK(length(group_key) = 33), + -- The asset group key identifying the asset group this push log belongs to. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), -- The highest block height among all supply leaves in this push. max_pushed_block_height INTEGER NOT NULL, @@ -946,18 +958,18 @@ CREATE TABLE supply_syncer_push_log ( created_at BIGINT NOT NULL ); -CREATE INDEX supply_syncer_push_log_group_key_idx - ON supply_syncer_push_log(group_key); +CREATE INDEX supply_syncer_push_log_group_key_idx ON supply_syncer_push_log(group_key); -CREATE INDEX supply_syncer_push_log_server_address_idx - ON supply_syncer_push_log(server_address); +CREATE INDEX supply_syncer_push_log_server_address_idx ON supply_syncer_push_log(server_address); CREATE TABLE supply_update_events ( event_id INTEGER PRIMARY KEY, -- The group key of the asset group this event belongs to. -- This is needed to query for dangling events for a specific group. - group_key BLOB NOT NULL CHECK(length(group_key) = 33), + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB NOT NULL CHECK(length(group_key) = 32), -- Reference to the state transition this event is part of. -- Can be NULL if the event is staged while another transition is active. @@ -1158,14 +1170,15 @@ CREATE TABLE universe_supply_leaves ( sub_tree_type TEXT NOT NULL REFERENCES proof_types(proof_type), -- The key used for this leaf within the root supply tree's MS-SMT. - -- This typically corresponds to a hash identifying the sub-tree type. + -- Typically a hash identifying the subtree type. leaf_node_key BLOB NOT NULL, - -- The namespace within mssmt_nodes where the actual sub-tree root node resides. + -- The namespace within mssmt_nodes where the actual subtree root node resides. leaf_node_namespace VARCHAR NOT NULL ); -CREATE UNIQUE INDEX universe_supply_leaves_supply_root_id_type_idx ON universe_supply_leaves(supply_root_id, sub_tree_type); +CREATE UNIQUE INDEX universe_supply_leaves_supply_root_id_type_idx + ON universe_supply_leaves (supply_root_id, sub_tree_type); CREATE TABLE universe_supply_roots ( id INTEGER PRIMARY KEY, @@ -1177,8 +1190,10 @@ CREATE TABLE universe_supply_roots ( -- is violated. namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, - -- The tweaked group key identifying the asset group this supply tree belongs to. - group_key BLOB UNIQUE NOT NULL CHECK(length(group_key) = 33) + -- The asset group key for this supply pre-commitment. + -- Stored in canonical 32-byte x-only form as defined in BIP340 + -- (schnorr.SerializePubKey). + group_key BLOB UNIQUE NOT NULL CHECK(length(group_key) = 32) ); CREATE INDEX universe_supply_roots_group_key_idx ON universe_supply_roots(group_key); From f1bfb7ac735040460845ac8ef5c133f10c65aa27 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 01:36:22 +0100 Subject: [PATCH 02/23] tapdb: use schnorr.ParsePubKey/SerializePubKey for group key columns Earlier commits changed group_key columns across several SQL tables to store 32-byte BIP340 public keys instead of 33-byte compressed (tweaked) keys. This commit replaces btcec.ParsePubKey with schnorr.ParsePubKey and pubKey.SerializeCompressed() with schnorr.SerializePubKey when reading from or writing to those columns. Aligns DB encoding with BIP340 and avoids size/format mismatches. --- tapdb/asset_minting.go | 13 +++++++------ tapdb/asset_minting_test.go | 5 +++-- tapdb/supply_commit.go | 28 ++++++++++++++-------------- tapdb/supply_commit_test.go | 3 ++- tapdb/supply_syncer.go | 3 ++- tapdb/supply_tree.go | 7 ++++--- 6 files changed, 32 insertions(+), 27 deletions(-) diff --git a/tapdb/asset_minting.go b/tapdb/asset_minting.go index 53e3e83fb..1ea162349 100644 --- a/tapdb/asset_minting.go +++ b/tapdb/asset_minting.go @@ -9,6 +9,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" @@ -433,9 +434,9 @@ func insertMintAnchorTx(ctx context.Context, q PendingAssetStore, // Serialize the group key if it is defined. The key may be unset when // there is no existing group and the minting batch is funded but not // yet sealed. - groupPubKey := fn.MapOptionZ( + groupPubKeyBytes := fn.MapOptionZ( preCommitOut.GroupPubKey, func(pubKey btcec.PublicKey) []byte { - return pubKey.SerializeCompressed() + return schnorr.SerializePubKey(&pubKey) }, ) @@ -453,7 +454,7 @@ func insertMintAnchorTx(ctx context.Context, q PendingAssetStore, BatchKey: rawBatchKey, TxOutputIndex: int32(preCommitOut.OutIdx), TaprootInternalKeyID: internalKeyID, - GroupKey: groupPubKey, + GroupKey: groupPubKeyBytes, Outpoint: outPointBytes, }, ) @@ -1375,7 +1376,7 @@ func marshalMintingBatch(ctx context.Context, q PendingAssetStore, // Parse the group public key from the database. var groupPubKey fn.Option[btcec.PublicKey] if res.GroupKey != nil { - gk, err := btcec.ParsePubKey(res.GroupKey) + gk, err := schnorr.ParsePubKey(res.GroupKey) if err != nil { return nil, fmt.Errorf("error parsing "+ "group public key: %w", err) @@ -1539,7 +1540,7 @@ func (a *AssetMintingStore) FetchDelegationKey(ctx context.Context, groupKey btcec.PublicKey) (fn.Option[tapgarden.DelegationKey], error) { var zero fn.Option[tapgarden.DelegationKey] - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(&groupKey) var delegationKey fn.Option[tapgarden.DelegationKey] @@ -1631,7 +1632,7 @@ func upsertPreCommit(ctx context.Context, q PendingAssetStore, groupPubKeyBytes := fn.MapOptionZ( preCommit.GroupPubKey, func(groupKey btcec.PublicKey) []byte { - return groupKey.SerializeCompressed() + return schnorr.SerializePubKey(&groupKey) }, ) diff --git a/tapdb/asset_minting_test.go b/tapdb/asset_minting_test.go index a61f884e4..4d6e3acbe 100644 --- a/tapdb/asset_minting_test.go +++ b/tapdb/asset_minting_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" @@ -1975,7 +1976,7 @@ func TestUpsertMintSupplyPreCommit(t *testing.T) { // Serialize keys into bytes for easier handling. preCommitInternalKey, _ := test.RandKeyDesc(t) - groupPubKeyBytes := group.GroupPubKey.SerializeCompressed() + groupPubKeyBytes := schnorr.SerializePubKey(&group.GroupPubKey) // Upsert a mint anchor commitment for the batch. storeMintSupplyPreCommit( @@ -2006,7 +2007,7 @@ func TestUpsertMintSupplyPreCommit(t *testing.T) { // Upsert-ing a new group key for the same pre-commit outpoint should // overwrite the existing one. groupPubKey2 := test.RandPubKey(t) - groupPubKey2Bytes := groupPubKey2.SerializeCompressed() + groupPubKey2Bytes := schnorr.SerializePubKey(groupPubKey2) storeMintSupplyPreCommit( t, *assetStore, batchKey, txOutputIndex, internalKey2, diff --git a/tapdb/supply_commit.go b/tapdb/supply_commit.go index 3603f5b62..9fd8452be 100644 --- a/tapdb/supply_commit.go +++ b/tapdb/supply_commit.go @@ -288,7 +288,7 @@ func (s *SupplyCommitMachine) UnspentPrecommits(ctx context.Context, if groupKey == nil { return lfn.Err[supplycommit.PreCommits](ErrMissingGroupKey) } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) var preCommits supplycommit.PreCommits readTx := ReadTxOption() @@ -318,7 +318,7 @@ func (s *SupplyCommitMachine) UnspentPrecommits(ctx context.Context, "pre-commitment internal key: %w", err) } - groupPubKey, err := btcec.ParsePubKey(row.GroupKey) + groupPubKey, err := schnorr.ParsePubKey(row.GroupKey) if err != nil { return fmt.Errorf("error parsing group key: %w", err) @@ -433,7 +433,7 @@ func (s *SupplyCommitMachine) SupplyCommit(ctx context.Context, ErrMissingGroupKey, ) } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) var rootCommitmentOpt lfn.Option[supplycommit.RootCommitment] @@ -613,7 +613,7 @@ func (s *SupplyCommitMachine) InsertPendingUpdate(ctx context.Context, if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) writeTx := WriteTxOption() return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { @@ -766,7 +766,7 @@ func (s *SupplyCommitMachine) FreezePendingTransition(ctx context.Context, if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) writeTx := WriteTxOption() return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { @@ -787,7 +787,7 @@ func (s *SupplyCommitMachine) BindDanglingUpdatesToTransition( if groupKey == nil { return nil, ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) var ( boundEvents []supplycommit.SupplyUpdateEvent @@ -881,7 +881,7 @@ func (s *SupplyCommitMachine) InsertSignedCommitTx(ctx context.Context, if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) commitTx := commitDetails.Txn internalKeyDesc := commitDetails.InternalKey @@ -1008,7 +1008,7 @@ func (s *SupplyCommitMachine) InsertSupplyCommit(ctx context.Context, if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) commitTx := commit.Txn internalKey := commit.InternalKey @@ -1200,7 +1200,7 @@ func (s *SupplyCommitMachine) CommitState(ctx context.Context, if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) newStateName, err := stateToDBString(state) if err != nil { @@ -1272,7 +1272,7 @@ func (s *SupplyCommitMachine) FetchCommitmentByOutpoint(ctx context.Context, var ( writeTx = WriteTxOption() - groupKeyBytes = groupKey.SerializeCompressed() + groupKeyBytes = schnorr.SerializePubKey(groupKey) commit *supplycommit.RootCommitment ) dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { @@ -1325,7 +1325,7 @@ func (s *SupplyCommitMachine) FetchCommitmentBySpentOutpoint( var ( writeTx = WriteTxOption() - groupKeyBytes = groupKey.SerializeCompressed() + groupKeyBytes = schnorr.SerializePubKey(groupKey) commit *supplycommit.RootCommitment ) dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { @@ -1376,7 +1376,7 @@ func (s *SupplyCommitMachine) FetchStartingCommitment(ctx context.Context, var ( writeTx = WriteTxOption() - groupKeyBytes = groupKey.SerializeCompressed() + groupKeyBytes = schnorr.SerializePubKey(groupKey) commit *supplycommit.RootCommitment ) dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { @@ -1570,7 +1570,7 @@ func (s *SupplyCommitMachine) FetchState(ctx context.Context, return nil, lfn.None[supplycommit.SupplyStateTransition](), ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) var ( state supplycommit.State @@ -1721,7 +1721,7 @@ func (s *SupplyCommitMachine) ApplyStateTransition( if groupKey == nil { return ErrMissingGroupKey } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) // Ensure we have the new commitment details. newCommitment := transition.NewCommitment diff --git a/tapdb/supply_commit_test.go b/tapdb/supply_commit_test.go index b03c457ac..b8dca9fa1 100644 --- a/tapdb/supply_commit_test.go +++ b/tapdb/supply_commit_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" @@ -204,7 +205,7 @@ func newSupplyCommitTestHarness(t *testing.T) *supplyCommitTestHarness { commitMachine: setup.commitMachine, db: setup.db, groupPubKey: setup.groupPubKey, - groupKeyBytes: setup.groupPubKey.SerializeCompressed(), + groupKeyBytes: schnorr.SerializePubKey(setup.groupPubKey), assetSpec: spec, baseGenesis: setup.baseGenesis, groupKey: groupKey, diff --git a/tapdb/supply_syncer.go b/tapdb/supply_syncer.go index ee4c3ecef..32060073d 100644 --- a/tapdb/supply_syncer.go +++ b/tapdb/supply_syncer.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/universe" @@ -73,7 +74,7 @@ func (s *SupplySyncerStore) LogSupplyCommitPush(ctx context.Context, "syncer log: %w", err) } - groupKeyBytes := groupKey.SerializeCompressed() + groupKeyBytes := schnorr.SerializePubKey(groupKey) // Extract the outpoint (transaction ID and output index) from the // commitment. diff --git a/tapdb/supply_tree.go b/tapdb/supply_tree.go index 4644af4ef..a50829a79 100644 --- a/tapdb/supply_tree.go +++ b/tapdb/supply_tree.go @@ -9,6 +9,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" @@ -53,7 +54,7 @@ func NewSupplyTreeStore(db BatchedUniverseTree) *SupplyTreeStore { // rootSupplyNamespace generates the SMT namespace for the root supply tree // associated with a given group key. func rootSupplyNamespace(groupKey *btcec.PublicKey) string { - keyHex := hex.EncodeToString(groupKey.SerializeCompressed()) + keyHex := hex.EncodeToString(schnorr.SerializePubKey(groupKey)) return fmt.Sprintf("%s-%s", supplyRootNS, keyHex) } @@ -62,7 +63,7 @@ func rootSupplyNamespace(groupKey *btcec.PublicKey) string { func subTreeNamespace(groupKey *btcec.PublicKey, treeType supplycommit.SupplySubTree) string { - keyHex := hex.EncodeToString(groupKey.SerializeCompressed()) + keyHex := hex.EncodeToString(schnorr.SerializePubKey(groupKey)) return fmt.Sprintf("%s-%s-%s", supplySubTreeNS, treeType.String(), keyHex) } @@ -83,7 +84,7 @@ func upsertSupplyTreeLeaf(ctx context.Context, dbTx BaseUniverseStore, rootID, err := dbTx.UpsertUniverseSupplyRoot(ctx, UpsertUniverseSupplyRoot{ NamespaceRoot: rootNs, - GroupKey: groupKey.SerializeCompressed(), + GroupKey: schnorr.SerializePubKey(groupKey), }, ) if err != nil { From 74aaae2f07931ed52d217cec2520bfeec4547650 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 11:52:06 +0100 Subject: [PATCH 03/23] supplyverifier: compare group pub keys by equivalence not equality Use the new IsEquivalentPubKeys helper to compare group pub keys during validation. This ensures we treat keys as equivalent if their BIP340 Schnorr serialization matches, avoiding issues with multiple encodings of the same point. --- universe/supplyverifier/verifier.go | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/universe/supplyverifier/verifier.go b/universe/supplyverifier/verifier.go index ebd89ce78..883347239 100644 --- a/universe/supplyverifier/verifier.go +++ b/universe/supplyverifier/verifier.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/lightninglabs/lndclient" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/mssmt" @@ -397,6 +398,19 @@ func (v *Verifier) proofVerifierCtx(ctx context.Context) proof.VerifierCtx { } } +// IsEquivalentPubKeys reports whether two public keys are equivalent +// when compared in their BIP340-serialized form. This avoids issues +// with multiple encodings of the same elliptic curve point, since +// BIP340 serialization provides a unique, canonical byte representation. +// +// TODO(ffranr): This should be a method on btcec.PublicKey. +func IsEquivalentPubKeys(a, b *btcec.PublicKey) bool { + return bytes.Equal( + schnorr.SerializePubKey(a), + schnorr.SerializePubKey(b), + ) +} + // verifyIssuanceLeaf verifies a single issuance leaf entry. func (v *Verifier) verifyIssuanceLeaf(ctx context.Context, assetSpec asset.Specifier, delegationKey btcec.PublicKey, @@ -463,9 +477,11 @@ func (v *Verifier) verifyIssuanceLeaf(ctx context.Context, return fmt.Errorf("missing group key in issuance leaf") } + // Check to ensure that the group key in the issuance leaf matches + // the group key in the issuance proof. proofGroupPubKey := issuanceProof.Asset.GroupKey.GroupPubKey leafGroupPubKey := issuanceLeaf.GroupKey.GroupPubKey - if !proofGroupPubKey.IsEqual(&leafGroupPubKey) { + if !IsEquivalentPubKeys(&proofGroupPubKey, &leafGroupPubKey) { return fmt.Errorf("group key in issuance leaf does not match " + "group key in issuance proof") } @@ -492,7 +508,7 @@ func (v *Verifier) verifyIssuanceLeaf(ctx context.Context, } leafGroupKey := issuanceProof.Asset.GroupKey.GroupPubKey - if leafGroupKey != *expectedGroupKey { + if !IsEquivalentPubKeys(&leafGroupKey, expectedGroupKey) { return fmt.Errorf("asset group key in issuance proof " + "does not match expected asset group key") } @@ -558,7 +574,7 @@ func (v *Verifier) verifyIgnoreLeaf(ctx context.Context, "specifier when verifying ignore leaf: %w", err) } - if assetGroup.GroupPubKey != *expectedGroupKey { + if !IsEquivalentPubKeys(&assetGroup.GroupPubKey, expectedGroupKey) { return fmt.Errorf("asset group key for ignore leaf asset " + "does not match expected asset group key") } @@ -617,7 +633,7 @@ func (v *Verifier) verifyBurnLeaf(ctx context.Context, } leafGroupKey := burnProof.Asset.GroupKey.GroupPubKey - if leafGroupKey != *expectedGroupKey { + if !IsEquivalentPubKeys(&leafGroupKey, expectedGroupKey) { return fmt.Errorf("asset group key in burn proof " + "does not match expected asset group key") } From 851993144d183b890486ac8451745b87c630972f Mon Sep 17 00:00:00 2001 From: ffranr Date: Tue, 9 Sep 2025 15:32:29 +0100 Subject: [PATCH 04/23] mssmt: add helper function NewProofFromCompressedBytes This function is helpful when unmarshalling gRPC compressed proof bytes. --- mssmt/proof.go | 33 ++++++ mssmt/proof_test.go | 258 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 mssmt/proof_test.go diff --git a/mssmt/proof.go b/mssmt/proof.go index 031fc4c2c..2c3e02038 100644 --- a/mssmt/proof.go +++ b/mssmt/proof.go @@ -1,6 +1,7 @@ package mssmt import ( + "bytes" "errors" "fmt" @@ -41,6 +42,38 @@ func NewProof(nodes []Node) *Proof { } } +// NewProofFromCompressedBytes initializes a new merkle proof from its +// compressed byte representation. +func NewProofFromCompressedBytes(compressedProofBytes []byte) (Proof, error) { + var zero Proof + + if len(compressedProofBytes) == 0 { + return zero, fmt.Errorf("compressed proof bytes are empty") + } + + var compressedProof CompressedProof + reader := bytes.NewReader(compressedProofBytes) + if err := compressedProof.Decode(reader); err != nil { + return zero, fmt.Errorf("decode compressed proof: %w", err) + } + + // Fail if extra data follows a valid proof encoding. + if remaining := reader.Len(); remaining != 0 { + return zero, fmt.Errorf("trailing data after compressed "+ + "proof: %d bytes", remaining) + } + + p, err := compressedProof.Decompress() + if err != nil { + return zero, fmt.Errorf("decompress proof: %w", err) + } + if p == nil { + return zero, fmt.Errorf("decompressor returned nil proof") + } + + return *p, nil +} + // Root returns the root node obtained by walking up the tree. func (p Proof) Root(key [32]byte, leaf Node) *BranchNode { // Note that we don't need to check the error here since the only point diff --git a/mssmt/proof_test.go b/mssmt/proof_test.go new file mode 100644 index 000000000..430a8fbfc --- /dev/null +++ b/mssmt/proof_test.go @@ -0,0 +1,258 @@ +package mssmt + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +// compressedProofBytes creates a compressed proof with the specified number of +// non-empty nodes. If numNodes is 0, all nodes will be empty. If numNodes is +// MaxTreeLevels, all node positions will be populated. +func compressedProofBytes(t *testing.T, numNodes int) []byte { + t.Helper() + + if numNodes < 0 || numNodes > MaxTreeLevels { + require.Fail(t, "numNodes must be between 0 and MaxTreeLevels") + } + + // Create the specified number of non-empty nodes. + nodes := make([]Node, numNodes) + for i := 0; i < numNodes; i++ { + hash := NodeHash{} + // Make each hash unique. + hash[0] = byte(i + 1) + nodes[i] = NewComputedNode(hash, uint64((i+1)*100)) + } + + // Create bits array: false for non-empty nodes, true for empty nodes. + bits := make([]bool, MaxTreeLevels) + for i := 0; i < MaxTreeLevels; i++ { + // First numNodes are false (non-empty), rest are true (empty). + bits[i] = i >= numNodes + } + + compressedProof := CompressedProof{ + Bits: bits, + Nodes: nodes, + } + + var buf bytes.Buffer + if err := compressedProof.Encode(&buf); err != nil { + panic(err) + } + return buf.Bytes() +} + +// TestNewProofFromCompressedBytes tests the NewProofFromCompressedBytes +// function with various valid and invalid compressed proof byte inputs. +func TestNewProofFromCompressedBytes(t *testing.T) { + t.Parallel() + + testCases := []struct { + // name describes the test case for identification. + name string + + // input is the compressed proof bytes to test with. + input []byte + + // expectError indicates whether an error is expected. + expectError bool + + // errorMsg is the expected error message substring when + // expectError is true. + errorMsg string + + // expectNumNodes is the expected number of populated + // (non-empty) nodes in the decompressed proof. + // Only relevant when expectError is false. + expectNumNodes int + }{ + { + name: "valid compressed proof with one node", + input: compressedProofBytes(t, 1), + expectError: false, + expectNumNodes: 1, + }, + { + name: "valid compressed proof with all empty " + + "nodes", + input: compressedProofBytes(t, 0), + expectError: false, + expectNumNodes: 0, + }, + { + name: "empty bytes", + input: []byte{}, + expectError: true, + errorMsg: "compressed proof bytes are empty", + expectNumNodes: 0, + }, + { + name: "single byte - insufficient data", + input: []byte{0x01}, + expectError: true, + errorMsg: "decode compressed proof", + expectNumNodes: 0, + }, + { + name: "only number of nodes field", + // numNodes = 1, but no node data. + input: []byte{0x00, 0x01}, + expectError: true, + errorMsg: "decode compressed proof", + expectNumNodes: 0, + }, + { + name: "invalid node count - more nodes than expected", + input: func() []byte { + // Create a proof that claims to have 2 nodes + // but bits indicate only 1. + node1 := NewComputedNode(NodeHash{0x01}, 100) + node2 := NewComputedNode(NodeHash{0x02}, 200) + + bits := make([]bool, MaxTreeLevels) + // Only one non-empty node indicated. + bits[0] = false + for i := 1; i < MaxTreeLevels; i++ { + bits[i] = true + } + + // Manually create invalid bytes. + var buf bytes.Buffer + // Write 2 nodes. + // numNodes = 2 + buf.Write([]byte{0x00, 0x02}) + // Node 1. + hash1 := node1.NodeHash() + buf.Write(hash1[:]) + // sum = 100 + buf.Write([]byte{0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x64}) + // Node 2. + hash2 := node2.NodeHash() + buf.Write(hash2[:]) + // sum = 200 + buf.Write([]byte{0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xC8}) + // Write bits (indicating only 1 non-empty + // node). + bitsBytes := PackBits(bits) + buf.Write(bitsBytes) + + return buf.Bytes() + }(), + expectError: true, + errorMsg: "invalid compressed proof", + expectNumNodes: 0, + }, + { + name: "invalid node count - fewer nodes than expected", + input: func() []byte { + // Create a proof that claims to have 0 nodes + // but bits indicate 1. + bits := make([]bool, MaxTreeLevels) + // One non-empty node indicated. + bits[0] = false + // Another non-empty node indicated. + bits[1] = false + for i := 2; i < MaxTreeLevels; i++ { + bits[i] = true + } + + // Manually create invalid bytes. + var buf bytes.Buffer + // numNodes = 0, but bits expect 2. + buf.Write([]byte{0x00, 0x00}) + // Write bits. + bitsBytes := PackBits(bits) + buf.Write(bitsBytes) + + return buf.Bytes() + }(), + expectError: true, + errorMsg: "invalid compressed proof", + expectNumNodes: 0, + }, + { + name: "trailing data after valid proof", + input: func() []byte { + validBytes := compressedProofBytes(t, 1) + // Add extra bytes at the end. + return append(validBytes, 0xFF, 0xFF) + }(), + expectError: true, + errorMsg: "trailing data after compressed proof", + expectNumNodes: 0, + }, + { + name: "maximum valid nodes", + input: compressedProofBytes(t, MaxTreeLevels), + expectError: false, + expectNumNodes: MaxTreeLevels, + }, + } + + for idx := range testCases { + tc := testCases[idx] + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + proof, err := NewProofFromCompressedBytes(tc.input) + + if tc.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorMsg) + + // Verify that the returned proof is the zero + // value. + var zeroProof Proof + require.Equal(t, zeroProof, proof) + + return + } + + require.NoError(t, err) + require.NotNil(t, proof.Nodes) + require.Len(t, proof.Nodes, MaxTreeLevels) + + // Count populated (non-empty) nodes. + populatedCount := 0 + for idx, node := range proof.Nodes { + // A node is populated if it doesn't match the + // corresponding EmptyTree node. + emptyTreeNode := EmptyTree[MaxTreeLevels-idx] + if node.NodeHash() != emptyTreeNode.NodeHash() { + populatedCount++ + } + } + require.Equal( + t, tc.expectNumNodes, populatedCount, + "expected %d populated nodes, got %d", + tc.expectNumNodes, populatedCount, + ) + + // Verify that we can compress the proof again and get + // similar bytes. + compressedAgain := proof.Compress() + require.NotNil(t, compressedAgain) + + // Verify that decompressing again yields the same + // proof. + proofAgain, err := compressedAgain.Decompress() + require.NoError(t, err) + require.NotNil(t, proofAgain) + + // Compare nodes (should be equal). + require.Len(t, proofAgain.Nodes, len(proof.Nodes)) + for i, node := range proof.Nodes { + isEqualNode := IsEqualNode( + node, proofAgain.Nodes[i], + ) + require.True(t, isEqualNode) + } + }) + } +} From fdd48ec5cb93a58001aa2aa7faa8a4cfde198239 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 10:56:16 +0100 Subject: [PATCH 05/23] supplysync_rpc: add support for FetchSupplyCommit RPC endpoint This enables extending the supplyverifier syncer to support pulling supply commitments in a follow-up commit. --- mssmt/node.go | 15 ++ supplysync_rpc.go | 215 +++++++++++++++++++++++++++++ universe/archive.go | 15 ++ universe/supplycommit/env.go | 14 ++ universe/supplycommit/interface.go | 57 ++++++++ 5 files changed, 316 insertions(+) create mode 100644 universe/supplycommit/interface.go diff --git a/mssmt/node.go b/mssmt/node.go index dbfeff6f0..e29b67f1e 100644 --- a/mssmt/node.go +++ b/mssmt/node.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "encoding/binary" "encoding/hex" + "fmt" ) const ( @@ -23,6 +24,20 @@ var ( // NodeHash represents the key of a MS-SMT node. type NodeHash [hashSize]byte +// NewNodeHashFromBytes creates a new NodeHash from a byte slice. +func NewNodeHashFromBytes(b []byte) (NodeHash, error) { + var zero NodeHash + + if len(b) != hashSize { + return zero, fmt.Errorf("invalid hash size: %d", len(b)) + } + + var h NodeHash + copy(h[:], b) + + return h, nil +} + // String returns a NodeHash as a hex-encoded string. func (k NodeHash) String() string { return hex.EncodeToString(k[:]) diff --git a/supplysync_rpc.go b/supplysync_rpc.go index 36babfbae..2cbbee4ae 100644 --- a/supplysync_rpc.go +++ b/supplysync_rpc.go @@ -8,6 +8,8 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/taprpc" unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" "github.com/lightninglabs/taproot-assets/universe" @@ -112,6 +114,128 @@ func (r *RpcSupplySync) InsertSupplyCommit(ctx context.Context, return nil } +// FetchSupplyCommit fetches a supply commitment for a specific asset group +// from the remote universe server. +func (r *RpcSupplySync) FetchSupplyCommit(ctx context.Context, + assetSpec asset.Specifier, + spentCommitOutpoint fn.Option[wire.OutPoint]) ( + supplycommit.FetchSupplyCommitResult, error) { + + var zero supplycommit.FetchSupplyCommitResult + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return zero, fmt.Errorf("unable to unwrap group key: %w", err) + } + + req := &unirpc.FetchSupplyCommitRequest{ + GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ + GroupKeyBytes: groupKey.SerializeCompressed(), + }, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, + }, + } + + // If a spent commit outpoint is provided, use that to locate the next + // supply commitment. + spentCommitOutpoint.WhenSome(func(outpoint wire.OutPoint) { + // nolint: lll + req.Locator = &unirpc.FetchSupplyCommitRequest_SpentCommitOutpoint{ + SpentCommitOutpoint: &taprpc.OutPoint{ + Txid: outpoint.Hash[:], + OutputIndex: outpoint.Index, + }, + } + }) + + resp, err := r.conn.FetchSupplyCommit(ctx, req) + if err != nil { + return zero, fmt.Errorf("unable to fetch supply commitment: %w", + err) + } + + // Unmarshal the chain data to get the root commitment. + rootCommitment, err := unmarshalSupplyCommitChainData(resp.ChainData) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal root "+ + "commitment: %w", err) + } + + // Extract the chain proof from the response data. + chainProof, err := unmarshalChainProof(resp.ChainData) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal chain proof: %w", + err) + } + + // Set the spent commitment outpoint if provided in response. + if resp.SpentCommitmentOutpoint != nil { + spentOutpoint := wire.OutPoint{ + Index: resp.SpentCommitmentOutpoint.OutputIndex, + } + copy(spentOutpoint.Hash[:], resp.SpentCommitmentOutpoint.Txid) + rootCommitment.SpentCommitment = fn.Some(spentOutpoint) + } + + // Unmarshal the supply leaves. + supplyLeaves, err := unmarshalSupplyLeaves( + resp.IssuanceLeaves, resp.BurnLeaves, resp.IgnoreLeaves, + ) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal supply leaves: %w", + err) + } + + // Convert spent commitment outpoint from RPC response to fn.Option. + var respSpentCommitOutpoint fn.Option[wire.OutPoint] + if resp.SpentCommitmentOutpoint != nil { + outpoint := wire.OutPoint{ + Index: resp.SpentCommitmentOutpoint.OutputIndex, + } + copy(outpoint.Hash[:], resp.SpentCommitmentOutpoint.Txid) + respSpentCommitOutpoint = fn.Some(outpoint) + } + + // Unmarshall RPC subtree roots. + issuanceSubtreeRoot, err := unmarshalSupplyCommitSubtreeRoot( + resp.IssuanceSubtreeRoot, + ) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal issuance subtree "+ + "root: %w", err) + } + + burnSubtreeRoot, err := unmarshalSupplyCommitSubtreeRoot( + resp.BurnSubtreeRoot, + ) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal burn subtree "+ + "root: %w", err) + } + + ignoreSubtreeRoot, err := unmarshalSupplyCommitSubtreeRoot( + resp.IgnoreSubtreeRoot, + ) + if err != nil { + return zero, fmt.Errorf("unable to unmarshal ignore subtree "+ + "root: %w", err) + } + + return supplycommit.FetchSupplyCommitResult{ + RootCommitment: *rootCommitment, + SupplyLeaves: *supplyLeaves, + ChainProof: chainProof, + TxChainFeesSats: resp.TxChainFeesSats, + + IssuanceSubtreeRoot: issuanceSubtreeRoot, + BurnSubtreeRoot: burnSubtreeRoot, + IgnoreSubtreeRoot: ignoreSubtreeRoot, + + SpentCommitmentOutpoint: respSpentCommitOutpoint, + }, nil +} + // Close closes the RPC connection to the universe server. func (r *RpcSupplySync) Close() error { if r.conn != nil && r.conn.ClientConn != nil { @@ -174,3 +298,94 @@ func marshalSupplyCommitChainData( return rpcChainData, nil } + +// unmarshalChainProof converts an RPC SupplyCommitChainData into +// a supplycommit.ChainProof. +func unmarshalChainProof( + rpcData *unirpc.SupplyCommitChainData) (supplycommit.ChainProof, + error) { + + var zero supplycommit.ChainProof + + if rpcData == nil { + return zero, fmt.Errorf("supply commit chain data is nil") + } + + var blockHeader wire.BlockHeader + err := blockHeader.Deserialize(bytes.NewReader(rpcData.BlockHeader)) + if err != nil { + return zero, fmt.Errorf("unable to deserialize block "+ + "header: %w", err) + } + + var merkleProof proof.TxMerkleProof + err = merkleProof.Decode(bytes.NewReader(rpcData.TxBlockMerkleProof)) + if err != nil { + return zero, fmt.Errorf("unable to decode merkle proof: %w", + err) + } + + return supplycommit.ChainProof{ + Header: blockHeader, + BlockHeight: rpcData.BlockHeight, + MerkleProof: merkleProof, + TxIndex: rpcData.TxIndex, + }, nil +} + +// unmarshalSupplyCommitSubtreeRoot converts an RPC SubtreeRootProof +// into a domain-specific SubtreeRootProof. +func unmarshalSupplyCommitSubtreeRoot(rpcRoot *unirpc.SupplyCommitSubtreeRoot) ( + supplycommit.SubtreeRootProof, error) { + + var zero supplycommit.SubtreeRootProof + + if rpcRoot == nil { + return zero, nil + } + + // Convert the RPC string type to SupplySubTree enum. + subTreeType, err := supplycommit.NewSubtreeTypeFromStr(rpcRoot.Type) + if err != nil { + return zero, fmt.Errorf("unknown subtree type: %w", err) + } + + // Convert the RPC MerkleSumNode to our domain BranchNode. + if rpcRoot.RootNode == nil { + return zero, fmt.Errorf("supply root node is nil") + } + + // Create a computed branch from the RPC node data. + nodeHash, err := mssmt.NewNodeHashFromBytes(rpcRoot.RootNode.RootHash) + if err != nil { + return zero, fmt.Errorf("unable to parse node hash: %w", err) + } + + rootNode := mssmt.NewComputedBranch( + nodeHash, uint64(rpcRoot.RootNode.RootSum), + ) + + // Convert the leaf key byte slice to a UniverseKey. + leafKey, err := universe.NewUniverseKeyFromBytes( + rpcRoot.SupplyTreeLeafKey, + ) + if err != nil { + return zero, fmt.Errorf("unable to parse leaf key: %w", err) + } + + // Unmarshall the compressed inclusion proof. + supplyTreeInclusionProof, err := mssmt.NewProofFromCompressedBytes( + rpcRoot.SupplyTreeInclusionProof, + ) + if err != nil { + return zero, fmt.Errorf("unable to decompress inclusion "+ + "proof: %w", err) + } + + return supplycommit.SubtreeRootProof{ + Type: subTreeType, + RootNode: *rootNode, + SupplyTreeLeafKey: leafKey, + SupplyTreeInclusionProof: supplyTreeInclusionProof, + }, nil +} diff --git a/universe/archive.go b/universe/archive.go index cc7a2b893..2e1835ad0 100644 --- a/universe/archive.go +++ b/universe/archive.go @@ -570,6 +570,21 @@ func (a *Archive) UpsertProofLeafBatch(ctx context.Context, // UniverseKey represents the key used to locate an item within a universe. type UniverseKey [32]byte +// NewUniverseKeyFromBytes creates a new universe key from the given byte slice. +func NewUniverseKeyFromBytes(b []byte) (UniverseKey, error) { + var zero UniverseKey + + if len(b) != 32 { + return zero, fmt.Errorf("invalid length for universe key, "+ + "expected 32 got %d", len(b)) + } + + var key UniverseKey + copy(key[:], b) + + return key, nil +} + // getPrevAssetSnapshot returns the previous asset snapshot for the passed // proof. If the proof is a genesis proof, then nil is returned. func (a *Archive) getPrevAssetSnapshot(ctx context.Context, diff --git a/universe/supplycommit/env.go b/universe/supplycommit/env.go index 05e06738c..3b3354e41 100644 --- a/universe/supplycommit/env.go +++ b/universe/supplycommit/env.go @@ -56,6 +56,20 @@ const ( IgnoreTreeType ) +// NewSubtreeTypeFromStr returns the SupplySubTree type from a string. +func NewSubtreeTypeFromStr(s string) (SupplySubTree, error) { + switch s { + case "mint_supply": + return MintTreeType, nil + case "burn": + return BurnTreeType, nil + case "ignore": + return IgnoreTreeType, nil + default: + return 0, fmt.Errorf("unknown supply subtree: %s", s) + } +} + // String returns the string representation of the supply sub tree. func (s SupplySubTree) String() string { switch s { diff --git a/universe/supplycommit/interface.go b/universe/supplycommit/interface.go new file mode 100644 index 000000000..9f43b636c --- /dev/null +++ b/universe/supplycommit/interface.go @@ -0,0 +1,57 @@ +package supplycommit + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/universe" +) + +// SubtreeRootProof represents the root of a supply commit subtree with its main +// supply tree inclusion proof. +type SubtreeRootProof struct { + // Type indicates the type of the supply commit subtree. + Type SupplySubTree + + // RootNode is the root node of the supply commit subtree. + RootNode mssmt.BranchNode + + // SupplyTreeLeafKey locates the subtree leaf node in the supply commit + // tree. + SupplyTreeLeafKey universe.UniverseKey + + // SupplyTreeInclusionProof proves inclusion of the subtree root in the + // supply tree. + SupplyTreeInclusionProof mssmt.Proof +} + +// FetchSupplyCommitResult represents the complete data returned from a +// FetchSupplyCommit RPC call, containing all fields from the RPC response. +type FetchSupplyCommitResult struct { + // RootCommitment contains the commitment transaction and output data. + RootCommitment RootCommitment + + // SupplyLeaves contains the issuance, burn, and ignore leaves. + SupplyLeaves SupplyLeaves + + // ChainProof contains the block header and merkle proof. + ChainProof ChainProof + + // TxChainFeesSats is the total number of satoshis in on-chain fees + // paid by the supply commitment transaction. + TxChainFeesSats int64 + + // IssuanceSubtreeRoot is the root of the issuance tree for the asset. + IssuanceSubtreeRoot SubtreeRootProof + + // BurnSubtreeRoot is the root of the burn tree for the asset. + BurnSubtreeRoot SubtreeRootProof + + // IgnoreSubtreeRoot is the root of the ignore tree for the asset. + IgnoreSubtreeRoot SubtreeRootProof + + // SpentCommitmentOutpoint is the outpoint of the previous commitment + // that this new commitment is spending. This is None for the very + // first supply commitment of a grouped asset. + SpentCommitmentOutpoint fn.Option[wire.OutPoint] +} From bdebf6bec3278279580cecc17985e90a0d144e1f Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 11:02:37 +0100 Subject: [PATCH 06/23] supplyverifier: add supply commit pull functionality to syncer Introduce the PullSupplyCommitment method, which calls the FetchSupplyCommit RPC endpoint. Support for calling this endpoint from the syncer was added in the previous commit. --- universe/supplyverifier/syncer.go | 167 +++++++++++++++++++++++++++++- 1 file changed, 165 insertions(+), 2 deletions(-) diff --git a/universe/supplyverifier/syncer.go b/universe/supplyverifier/syncer.go index 1a24ea591..e4d9a23b7 100644 --- a/universe/supplyverifier/syncer.go +++ b/universe/supplyverifier/syncer.go @@ -4,13 +4,21 @@ import ( "context" "fmt" "net/url" + "sync" + "time" + "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" ) +const ( + // defaultPullTimeout is the default timeout for a supply commitment + defaultPullTimeout = 30 * time.Second +) + // UniverseClient is an interface that represents a client connection to a // remote universe server. type UniverseClient interface { @@ -21,6 +29,12 @@ type UniverseClient interface { updateLeaves supplycommit.SupplyLeaves, chainProof supplycommit.ChainProof) error + // FetchSupplyCommit fetches a supply commitment for a specific + // asset group from the remote universe server. + FetchSupplyCommit(ctx context.Context, assetSpec asset.Specifier, + spentCommitOutpoint fn.Option[wire.OutPoint]) ( + supplycommit.FetchSupplyCommitResult, error) + // Close closes the fetcher and cleans up any resources. Close() error } @@ -214,11 +228,160 @@ func (s *SupplySyncer) PushSupplyCommitment(ctx context.Context, } errorMap := make(map[string]error) - for idx, fetchErr := range pushErrs { + for idx, pushErr := range pushErrs { serverAddr := targetAddrs[idx] hostStr := serverAddr.HostStr() - errorMap[hostStr] = fetchErr + errorMap[hostStr] = pushErr } return errorMap, nil } + +// pullUniServer fetches the supply commitment from a specific universe server. +func (s *SupplySyncer) pullUniServer(ctx context.Context, + assetSpec asset.Specifier, spentCommitOutpoint fn.Option[wire.OutPoint], + serverAddr universe.ServerAddr) (supplycommit.FetchSupplyCommitResult, + error) { + + var zero supplycommit.FetchSupplyCommitResult + + // Create a client for the specific universe server address. + client, err := s.cfg.ClientFactory(serverAddr) + if err != nil { + return zero, fmt.Errorf("unable to create universe client: %w", + err) + } + + // Ensure the client is properly closed when we're done. + defer func() { + if closeErr := client.Close(); closeErr != nil { + log.Errorf("Unable to close supply syncer pull "+ + "universe client: %v", closeErr) + } + }() + + result, err := client.FetchSupplyCommit( + ctx, assetSpec, spentCommitOutpoint, + ) + if err != nil { + return zero, fmt.Errorf("unable to fetch supply commitment: %w", + err) + } + + return result, nil +} + +// SupplyCommitPullResult represents the result of a supply commitment pull +// operation across multiple universe servers. +type SupplyCommitPullResult struct { + // FetchResult contains the complete fetched supply commitment data. + FetchResult fn.Option[supplycommit.FetchSupplyCommitResult] + + // ErrorMap contains errors encountered while pulling from each server, + // keyed by server host string. If empty, all pulls succeeded. + ErrorMap map[string]error +} + +// PullSupplyCommitment fetches a supply commitment from remote universe +// servers. This function attempts to fetch from all servers in parallel. +// +// Returns a SupplyCommitPullResult containing the fetched data and a map of +// per-server errors, plus an internal error. If at least one server succeeds, +// the result will contain the commitment data. If all servers fail, the +// ErrorMap will contain all the errors and the commitment data will be nil. +// +// NOTE: This function must be thread safe. +func (s *SupplySyncer) PullSupplyCommitment(ctx context.Context, + assetSpec asset.Specifier, spentCommitOutpoint fn.Option[wire.OutPoint], + canonicalUniverses []url.URL) (SupplyCommitPullResult, error) { + + var zero SupplyCommitPullResult + + targetAddrs, err := s.fetchServerAddrs(ctx, canonicalUniverses) + if err != nil { + // This is an internal error that prevents the operation from + // proceeding. + return zero, fmt.Errorf("unable to fetch target universe "+ + "server addresses: %w", err) + } + + // Pull the supply commitment from all target universe servers in + // parallel. Store both errors and successful results. + var muResults sync.Mutex + results := make(map[string]supplycommit.FetchSupplyCommitResult) + + // Specify context timeout for the entire pull operation. + ctxPull, cancel := context.WithTimeout(ctx, defaultPullTimeout) + defer cancel() + + pullErrs, err := fn.ParSliceErrCollect( + ctxPull, targetAddrs, func(ctx context.Context, + serverAddr universe.ServerAddr) error { + + // Pull the supply commitment from the universe server. + result, err := s.pullUniServer( + ctx, assetSpec, spentCommitOutpoint, serverAddr, + ) + if err != nil { + return fmt.Errorf("unable to pull supply "+ + "commitment (server_addr_id=%d, "+ + "server_addr_host_str=%s): %w", + serverAddr.ID, serverAddr.HostStr(), + err) + } + + muResults.Lock() + results[serverAddr.HostStr()] = result + muResults.Unlock() + + return nil + }, + ) + if err != nil { + // This should not happen with ParSliceErrCollect, but handle it + // as an internal error. + return zero, fmt.Errorf("unable to pull supply commitment: %w", + err) + } + + // Report results: log server address and supply tree root. + // + // If the supply commitment that was pulled fails verification later, + // we can use this log to trace back to the server it came from. + for serverAddr, res := range results { + // Format the spent outpoint if present, otherwise empty string. + spentOutpointStr := fn.MapOptionZ( + spentCommitOutpoint, func(op wire.OutPoint) string { + return op.String() + }, + ) + + log.Infof("Pulled supply commitment from server "+ + "(server_addr=%s, asset=%s, supply_tree_root=%s, "+ + "spent_outpoint=%s)", serverAddr, assetSpec.String(), + res.RootCommitment.SupplyRoot.NodeHash().String(), + spentOutpointStr) + } + + // Return one successful result, if available, as the final outcome. + var finalResult *supplycommit.FetchSupplyCommitResult + for _, res := range results { + finalResult = &res + break + } + + // Build a map from server addresses to their corresponding errors. + errorMap := make(map[string]error) + for idx, pullErr := range pullErrs { + serverAddr := targetAddrs[idx] + hostStr := serverAddr.HostStr() + if pullErr != nil { + errorMap[hostStr] = pullErr + } + } + + return SupplyCommitPullResult{ + FetchResult: fn.MaybeSome(finalResult), + ErrorMap: errorMap, + }, nil +} From f984d6a367b148b28c0a819a8fd67784cabde22e Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 13:15:42 +0100 Subject: [PATCH 07/23] supplycommit: refactor by introducing startAssetSM method Consolidate all state machine startup logic into a new method called startAssetSM to improve code clarity. Added IsRunning checks to ensure the state machine is active before returning it from the cache. --- universe/supplycommit/manager.go | 104 +++++++++++++++++++------------ 1 file changed, 64 insertions(+), 40 deletions(-) diff --git a/universe/supplycommit/manager.go b/universe/supplycommit/manager.go index c3859091d..a81eafeb2 100644 --- a/universe/supplycommit/manager.go +++ b/universe/supplycommit/manager.go @@ -191,46 +191,11 @@ func (m *Manager) ensureSupplyCommitSupport(ctx context.Context, return nil } -// fetchStateMachine retrieves a state machine from the cache or creates a -// new one if it doesn't exist. If a new state machine is created, it is also -// started. -func (m *Manager) fetchStateMachine( +// startAssetSM creates and starts a new supply commitment state +// machine for the given asset specifier. +func (m *Manager) startAssetSM(ctx context.Context, assetSpec asset.Specifier) (*StateMachine, error) { - groupKey, err := assetSpec.UnwrapGroupKeyOrErr() - if err != nil { - return nil, fmt.Errorf("asset specifier missing group key: %w", - err) - } - - // Check if the state machine for the asset group already exists in the - // cache. - sm, ok := m.smCache.Get(*groupKey) - if ok { - return sm, nil - } - - // If the state machine is not found, create a new one. - // - // Before we can create a state machine, we need to ensure that the - // asset group supports supply commitments. If it doesn't, then we - // return an error. - ctx, cancel := m.WithCtxQuitNoTimeout() - defer cancel() - - metaReveal, err := FetchLatestAssetMetadata( - ctx, m.cfg.AssetLookup, assetSpec, - ) - if err != nil { - return nil, fmt.Errorf("faild to fetch asset meta: %w", err) - } - - err = m.ensureSupplyCommitSupport(ctx, metaReveal) - if err != nil { - return nil, fmt.Errorf("failed to ensure supply commit "+ - "support for asset: %w", err) - } - env := &Environment{ AssetSpec: assetSpec, TreeView: m.cfg.TreeView, @@ -270,6 +235,12 @@ func (m *Manager) fetchStateMachine( smCtx, _ := m.WithCtxQuitNoTimeout() newSm.Start(smCtx) + // Assert that the state machine is running. Start should block until + // the state machine is running. + if !newSm.IsRunning() { + return nil, fmt.Errorf("state machine unexpectadly not running") + } + // If specific initial states are provided, we send the corresponding // events to the state machine to ensure it begins ticking as expected. switch initialState.(type) { @@ -287,11 +258,64 @@ func (m *Manager) fetchStateMachine( newSm.SendEvent(ctx, &FinalizeEvent{}) } - m.smCache.Set(*groupKey, &newSm) - return &newSm, nil } +// fetchStateMachine retrieves a state machine from the cache or creates a +// new one if it doesn't exist. If a new state machine is created, it is also +// started. +func (m *Manager) fetchStateMachine( + assetSpec asset.Specifier) (*StateMachine, error) { + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return nil, fmt.Errorf("asset specifier missing group key: %w", + err) + } + + // Check if the state machine for the asset group already exists in the + // cache. + sm, ok := m.smCache.Get(*groupKey) + if ok { + // If the state machine is found and is running, return it. + if sm.IsRunning() { + return sm, nil + } + + // If the state machine exists but is not running, replace it in + // the cache with a new running instance. + } + + // Before we can create a state machine, we need to ensure that the + // asset group supports supply commitments. If it doesn't, then we + // return an error. + ctx, cancel := m.WithCtxQuitNoTimeout() + defer cancel() + + metaReveal, err := FetchLatestAssetMetadata( + ctx, m.cfg.AssetLookup, assetSpec, + ) + if err != nil { + return nil, fmt.Errorf("faild to fetch asset meta: %w", err) + } + + err = m.ensureSupplyCommitSupport(ctx, metaReveal) + if err != nil { + return nil, fmt.Errorf("failed to ensure supply commit "+ + "support for asset: %w", err) + } + + // Start the state machine and add it to the cache. + newSm, err := m.startAssetSM(ctx, assetSpec) + if err != nil { + return nil, fmt.Errorf("unable to start state machine: %w", err) + } + + m.smCache.Set(*groupKey, newSm) + + return newSm, nil +} + // SendEvent sends an event to the state machine associated with the given asset // specifier. If a state machine for the asset group does not exist, it will be // created and started. From 8d13b75a0ba0c77e6e5fe629a0dd30d864556780 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 15:08:48 +0100 Subject: [PATCH 08/23] supplyverifier: refactor by introducing startAssetSM method Consolidate all state machine startup logic into a new method called startAssetSM to improve code clarity. Added IsRunning checks to ensure the state machine is active before returning it from the cache. --- universe/supplyverifier/manager.go | 77 +++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 23 deletions(-) diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index f88c9233f..fd9a5bac6 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -153,24 +153,10 @@ func (m *Manager) Stop() error { return nil } -// fetchStateMachine retrieves a state machine from the cache or creates a -// new one if it doesn't exist. If a new state machine is created, it is also -// started. -func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, - error) { - - groupKey, err := assetSpec.UnwrapGroupKeyOrErr() - if err != nil { - return nil, fmt.Errorf("asset specifier missing group key: %w", - err) - } - - // Check if the state machine for the asset group already exists in the - // cache. - sm, ok := m.smCache.Get(*groupKey) - if ok { - return sm, nil - } +// startAssetSM creates and starts a new supply commitment state machine for the +// given asset specifier. +func (m *Manager) startAssetSM(ctx context.Context, + assetSpec asset.Specifier) (*StateMachine, error) { // If the state machine is not found, create a new one. env := &Environment{ @@ -183,9 +169,6 @@ func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, // Before we start the state machine, we'll need to fetch the current // state from disk, to see if we need to emit any new events. - ctx, cancel := m.WithCtxQuitNoTimeout() - defer cancel() - initialState, err := m.cfg.StateLog.FetchState(ctx, assetSpec) if err != nil { return nil, fmt.Errorf("unable to fetch current state: %w", err) @@ -208,15 +191,63 @@ func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, smCtx, _ := m.WithCtxQuitNoTimeout() newSm.Start(smCtx) + // Assert that the state machine is running. Start should block until + // the state machine is running. + if !newSm.IsRunning() { + return nil, fmt.Errorf("state machine unexpectadly not running") + } + // For supply verifier, we always start with an InitEvent to begin // the verification process. newSm.SendEvent(ctx, &InitEvent{}) - m.smCache.Set(*groupKey, &newSm) - return &newSm, nil } +// fetchStateMachine retrieves a state machine from the cache or creates a +// new one if it doesn't exist. If a new state machine is created, it is also +// started. +func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, + error) { + + groupKey, err := assetSpec.UnwrapGroupKeyOrErr() + if err != nil { + return nil, fmt.Errorf("asset specifier missing group key: %w", + err) + } + + // Check if the state machine for the asset group already exists in the + // cache. + sm, ok := m.smCache.Get(*groupKey) + if ok { + // If the state machine is found and is running, return it. + if sm.IsRunning() { + return sm, nil + } + + // If the state machine exists but is not running, replace it in + // the cache with a new running instance. + } + + ctx, cancel := m.WithCtxQuitNoTimeout() + defer cancel() + + // TODO(ffranr): Check that the asset group supports supply commitments + // and that this node does not create supply commitments for the asset + // group (i.e. it does not own the delegation key). We don't want to + // run a verifier state machine for an asset group supply commitment + // that we issue ourselves. + + newSm, err := m.startAssetSM(ctx, assetSpec) + if err != nil { + return nil, fmt.Errorf("unable to start state machine: %w", err) + } + + m.smCache.Set(*groupKey, newSm) + + return newSm, nil +} + // InsertSupplyCommit stores a verified supply commitment for the given asset // group in the node's local database. func (m *Manager) InsertSupplyCommit(ctx context.Context, From f102c409963fcc093062bc637b272e3489546b51 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 16:37:56 +0100 Subject: [PATCH 09/23] tapdb: add TapAddressBook.FetchSupplyCommitAssets Add a method that returns all asset groups with supply commitment enabled. This will be used to set up supply verifier state machines for each eligible asset group at `tapd` startup. Which enables on-chain spend watch from the start and ensures proper setup after a reboot. --- tapdb/addrs.go | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/tapdb/addrs.go b/tapdb/addrs.go index dae73d521..bf283d7da 100644 --- a/tapdb/addrs.go +++ b/tapdb/addrs.go @@ -233,6 +233,9 @@ type AddrBook interface { // database. FetchAllAssetMeta(ctx context.Context) ([]AllAssetMetaRow, error) + // FetchGroupedAssets fetches all assets with non-nil group keys. + FetchGroupedAssets(ctx context.Context) ([]RawGroupedAsset, error) + // QueryLastEventHeight queries the last event height for a given // address version. QueryLastEventHeight(ctx context.Context, @@ -1637,6 +1640,102 @@ func (t *TapAddressBook) FetchInternalKeyLocator(ctx context.Context, return keyLoc, nil } +// FetchSupplyCommitAssets fetches all assets with non-nil group keys that could +// potentially be involved in supply commitments. +func (t *TapAddressBook) FetchSupplyCommitAssets(ctx context.Context, + localControlled bool) ([]btcec.PublicKey, error) { + + var ( + readOpts = NewAddrBookReadTx() + assetGroupKeys []btcec.PublicKey + ) + + err := t.db.ExecTx(ctx, &readOpts, func(db AddrBook) error { + // Fetch all grouped assets from the database. + dbAssets, err := db.FetchGroupedAssets(ctx) + if err != nil { + return err + } + + // Convert to our simplified format. + assetGroupKeys = make([]btcec.PublicKey, 0, len(dbAssets)) + for idx := range dbAssets { + dbAsset := dbAssets[idx] + + groupKey, err := btcec.ParsePubKey( + dbAsset.TweakedGroupKey, + ) + if err != nil { + return fmt.Errorf("unable to parse group "+ + "key: %w", err) + } + + // Get asset metadata for this group to check if it + // supports supply commitments. + metaRow, err := db.FetchAssetMetaForAsset( + ctx, dbAsset.AssetID, + ) + if err != nil { + // If metadata not found, skip this asset group + // as it doesn't support supply commitments. + continue + } + + // Check if the asset group supports supply commitments. + assetMetaRow := metaRow.AssetsMetum + if !assetMetaRow.MetaUniverseCommitments.Valid || + !assetMetaRow.MetaUniverseCommitments.Bool { + + continue + } + + // Check if a delegation key is present (required for + // supply commitments). + if len(metaRow.AssetsMetum.MetaDelegationKey) == 0 { + continue + } + + // Parse delegation key from metadata. + delegationPubKey, err := btcec.ParsePubKey( + metaRow.AssetsMetum.MetaDelegationKey, + ) + if err != nil { + continue + } + + // Filter based on localControlled parameter: + // - If localControlled=true: only return assets where + // we own the delegation key + // - If localControlled=false: only return assets where + // we DON'T own the delegation key + _, err = db.FetchInternalKeyLocator( + ctx, delegationPubKey.SerializeCompressed(), + ) + weOwnDelegationKey := err == nil + + if localControlled && !weOwnDelegationKey { + // We want assets under local control, but we do + // not own this one. + continue + } + if !localControlled && weOwnDelegationKey { + // We want assets not under local control, but + // we own this one. + continue + } + + assetGroupKeys = append(assetGroupKeys, *groupKey) + } + + return nil + }) + if err != nil { + return nil, err + } + + return assetGroupKeys, nil +} + // A set of compile-time assertions to ensure that TapAddressBook meets the // address.Storage and address.EventStorage interface. var _ address.Storage = (*TapAddressBook)(nil) From 4215821db79efb8e333959b21c595fcce55f2372 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 1 Sep 2025 11:34:51 +0100 Subject: [PATCH 10/23] tapdb: add SQL query QueryLatestSupplyCommitment Add a new SQL query to fetch the latest stored supply commitment based on the highest block height. --- tapdb/sqlc/querier.go | 1 + tapdb/sqlc/queries/supply_commit.sql | 9 +++++++ tapdb/sqlc/supply_commit.sql.go | 36 ++++++++++++++++++++++++++++ tapdb/supply_commit.go | 5 ++++ 4 files changed, 51 insertions(+) diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index f74c59523..4de28028e 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -192,6 +192,7 @@ type Querier interface { QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) QueryFederationUniSyncConfigs(ctx context.Context) ([]QueryFederationUniSyncConfigsRow, error) QueryLastEventHeight(ctx context.Context, version int16) (int64, error) + QueryLatestSupplyCommitment(ctx context.Context, groupKey []byte) (QueryLatestSupplyCommitmentRow, error) QueryMultiverseLeaves(ctx context.Context, arg QueryMultiverseLeavesParams) ([]QueryMultiverseLeavesRow, error) QueryPassiveAssets(ctx context.Context, transferID int64) ([]QueryPassiveAssetsRow, error) QueryPendingSupplyCommitTransition(ctx context.Context, groupKey []byte) (QueryPendingSupplyCommitTransitionRow, error) diff --git a/tapdb/sqlc/queries/supply_commit.sql b/tapdb/sqlc/queries/supply_commit.sql index b5c0eec8b..084ff0ce4 100644 --- a/tapdb/sqlc/queries/supply_commit.sql +++ b/tapdb/sqlc/queries/supply_commit.sql @@ -173,6 +173,15 @@ FROM supply_commitments AS sc WHERE sc.spent_commitment IS NULL AND sc.group_key = @group_key; +-- name: QueryLatestSupplyCommitment :one +SELECT sqlc.embed(sc), ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.group_key = @group_key +ORDER BY ct.block_height DESC + LIMIT 1; + -- name: QuerySupplyCommitmentOutpoint :one SELECT ct.txid, sc.output_index FROM supply_commitments AS sc diff --git a/tapdb/sqlc/supply_commit.sql.go b/tapdb/sqlc/supply_commit.sql.go index 960aa28ff..c61d117b8 100644 --- a/tapdb/sqlc/supply_commit.sql.go +++ b/tapdb/sqlc/supply_commit.sql.go @@ -452,6 +452,42 @@ func (q *Queries) QueryExistingPendingTransition(ctx context.Context, groupKey [ return transition_id, err } +const QueryLatestSupplyCommitment = `-- name: QueryLatestSupplyCommitment :one +SELECT sc.commit_id, sc.group_key, sc.chain_txn_id, sc.output_index, sc.internal_key_id, sc.output_key, sc.block_header, sc.block_height, sc.merkle_proof, sc.supply_root_hash, sc.supply_root_sum, sc.spent_commitment, ct.tx_index +FROM supply_commitments AS sc +JOIN chain_txns AS ct + ON sc.chain_txn_id = ct.txn_id +WHERE sc.group_key = $1 +ORDER BY ct.block_height DESC + LIMIT 1 +` + +type QueryLatestSupplyCommitmentRow struct { + SupplyCommitment SupplyCommitment + TxIndex sql.NullInt32 +} + +func (q *Queries) QueryLatestSupplyCommitment(ctx context.Context, groupKey []byte) (QueryLatestSupplyCommitmentRow, error) { + row := q.db.QueryRowContext(ctx, QueryLatestSupplyCommitment, groupKey) + var i QueryLatestSupplyCommitmentRow + err := row.Scan( + &i.SupplyCommitment.CommitID, + &i.SupplyCommitment.GroupKey, + &i.SupplyCommitment.ChainTxnID, + &i.SupplyCommitment.OutputIndex, + &i.SupplyCommitment.InternalKeyID, + &i.SupplyCommitment.OutputKey, + &i.SupplyCommitment.BlockHeader, + &i.SupplyCommitment.BlockHeight, + &i.SupplyCommitment.MerkleProof, + &i.SupplyCommitment.SupplyRootHash, + &i.SupplyCommitment.SupplyRootSum, + &i.SupplyCommitment.SpentCommitment, + &i.TxIndex, + ) + return i, err +} + const QueryPendingSupplyCommitTransition = `-- name: QueryPendingSupplyCommitTransition :one WITH target_machine AS ( SELECT group_key diff --git a/tapdb/supply_commit.go b/tapdb/supply_commit.go index 9fd8452be..30d3f7259 100644 --- a/tapdb/supply_commit.go +++ b/tapdb/supply_commit.go @@ -205,6 +205,11 @@ type SupplyCommitStore interface { QueryStartingSupplyCommitment(ctx context.Context, groupKey []byte) (sqlc.QueryStartingSupplyCommitmentRow, error) + // QueryLatestSupplyCommitment fetches the latest supply commitment + // of an asset group based on highest block height. + QueryLatestSupplyCommitment(ctx context.Context, + groupKey []byte) (sqlc.QueryLatestSupplyCommitmentRow, error) + // QuerySupplyCommitmentOutpoint fetches the outpoint of a supply // commitment by its ID. QuerySupplyCommitmentOutpoint(ctx context.Context, From adb6513d1b1e63770b17c49e55e911809e0c3b51 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 1 Sep 2025 11:35:40 +0100 Subject: [PATCH 11/23] tapdb: add method FetchLatestCommitment to SupplyCommitMachine db store --- tapdb/supply_commit.go | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tapdb/supply_commit.go b/tapdb/supply_commit.go index 30d3f7259..0cbe5bb74 100644 --- a/tapdb/supply_commit.go +++ b/tapdb/supply_commit.go @@ -1418,6 +1418,56 @@ func (s *SupplyCommitMachine) FetchStartingCommitment(ctx context.Context, return commit, nil } +// FetchLatestCommitment fetches the latest supply commitment of an asset +// group based on highest block height. If no commitment is found, it returns +// ErrCommitmentNotFound. +func (s *SupplyCommitMachine) FetchLatestCommitment(ctx context.Context, + assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return nil, ErrMissingGroupKey + } + + var ( + writeTx = WriteTxOption() + groupKeyBytes = groupKey.SerializeCompressed() + commit *supplycommit.RootCommitment + ) + dbErr := s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, fetch the supply commitment by group key. + commitRow, err := db.QueryLatestSupplyCommitment( + ctx, groupKeyBytes, + ) + if err != nil { + return fmt.Errorf("failed to query latest "+ + "commitment for group %x: %w", groupKeyBytes, + err) + } + + commit, err = parseSupplyCommitmentRow( + ctx, commitRow.SupplyCommitment, commitRow.TxIndex, db, + ) + if err != nil { + return fmt.Errorf("failed to parse latest "+ + "commitment for group %x: %w", groupKeyBytes, + err) + } + + return nil + }) + if dbErr != nil { + if errors.Is(dbErr, sql.ErrNoRows) { + return nil, supplyverifier.ErrCommitmentNotFound + } + + return nil, fmt.Errorf("failed to fetch latest commitment "+ + "for group %x: %w", groupKeyBytes, dbErr) + } + + return commit, nil +} + // parseSupplyCommitmentRow parses a SupplyCommitment row into a // supplycommit.RootCommitment and optional commitmentChainInfo. func parseSupplyCommitmentRow(ctx context.Context, commit SupplyCommitment, From 9e298a960676bdf0814a4dbae7e3b85bbe7b5dd7 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 29 Aug 2025 17:17:10 +0100 Subject: [PATCH 12/23] supplyverifier: init state machines for asset groups with commitments Query the local database for asset groups with supply commitment enabled where the node cannot publish commitments (no delegation key). These are commitments published by external issuers. Commitments from the current node are excluded, as they do not need to be tracked by the verifier. This step is skipped when running in universe server mode. --- universe/supplycommit/env.go | 5 +++ universe/supplycommit/mock.go | 10 +++++ universe/supplyverifier/manager.go | 71 ++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) diff --git a/universe/supplycommit/env.go b/universe/supplycommit/env.go index 3b3354e41..be5a59442 100644 --- a/universe/supplycommit/env.go +++ b/universe/supplycommit/env.go @@ -232,6 +232,11 @@ func NewSupplyLeavesFromEvents(events []SupplyUpdateEvent) (SupplyLeaves, // AssetLookup is an interface that allows us to query for asset // information, such as asset groups and asset metadata. type AssetLookup interface { + // FetchSupplyCommitAssets fetches all assets with non-nil group keys + // that are supply commitments enabled. + FetchSupplyCommitAssets(ctx context.Context, + localControlled bool) ([]btcec.PublicKey, error) + // QueryAssetGroupByID attempts to fetch an asset group by its asset ID. // If the asset group cannot be found, then ErrAssetGroupUnknown is // returned. diff --git a/universe/supplycommit/mock.go b/universe/supplycommit/mock.go index 974ff2cf0..10eefe513 100644 --- a/universe/supplycommit/mock.go +++ b/universe/supplycommit/mock.go @@ -437,6 +437,16 @@ type mockAssetLookup struct { mock.Mock } +func (m *mockAssetLookup) FetchSupplyCommitAssets(ctx context.Context, + localControlled bool) ([]btcec.PublicKey, error) { + + args := m.Called(ctx, localControlled) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]btcec.PublicKey), args.Error(1) +} + func (m *mockAssetLookup) QueryAssetGroupByID(ctx context.Context, assetID asset.ID) (*asset.AssetGroup, error) { diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index fd9a5bac6..2c02e3277 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -78,6 +78,10 @@ type ManagerCfg struct { // SupplyTreeView is used to fetch supply leaves by height. SupplyTreeView SupplyTreeView + // SupplySyncer is used to retrieve supply leaves from a universe and + // persist them to the local database. + SupplySyncer SupplySyncer + // GroupFetcher is used to fetch asset group information. GroupFetcher tapgarden.GroupFetcher @@ -128,12 +132,71 @@ func NewManager(cfg ManagerCfg) *Manager { } } +// InitStateMachines initializes state machines for all asset groups that +// support supply commitments. If a state machine for an asset group already +// exists, it will be skipped. +func (m *Manager) InitStateMachines() error { + ctx, cancel := m.WithCtxQuitNoTimeout() + defer cancel() + + // First, get all assets with group keys that could potentially be + // involved in supply commitments. The Manager will filter these + // based on delegation key ownership and other criteria. + assetGroupKeys, err := m.cfg.AssetLookup.FetchSupplyCommitAssets( + ctx, false, + ) + if err != nil { + return fmt.Errorf("unable to fetch supply commit assets: %w", + err) + } + + for idx := range assetGroupKeys { + groupKey := assetGroupKeys[idx] + + // Create asset specifier from group key. + assetSpec := asset.NewSpecifierFromGroupKey(groupKey) + + // Check to ensure state machine for asset group does not + // already exist. + _, ok := m.smCache.Get(groupKey) + if ok { + continue + } + + // Create and start a new state machine for the asset group. + newSm, err := m.startAssetSM(ctx, assetSpec) + if err != nil { + return fmt.Errorf("unable to start state machine for "+ + "asset group (asset=%s): %w", + assetSpec.String(), err) + } + + m.smCache.Set(groupKey, newSm) + } + + return nil +} + // Start starts the multi state machine manager. func (m *Manager) Start() error { + var startErr error + m.startOnce.Do(func() { // Initialize the state machine cache. m.smCache = newStateMachineCache() + + // Initialize state machines for each asset group that supports + // supply commitments. + err := m.InitStateMachines() + if err != nil { + startErr = fmt.Errorf("unable to initialize "+ + "state machines: %v", err) + return + } }) + if startErr != nil { + return fmt.Errorf("unable to start manager: %w", startErr) + } return nil } @@ -645,6 +708,14 @@ func (c *stateMachineCache) StopAll() { } } +// Count returns the number of state machines in the cache. +func (c *stateMachineCache) Count() int { + c.mu.RLock() + defer c.mu.RUnlock() + + return len(c.cache) +} + // Get retrieves a state machine from the cache. func (c *stateMachineCache) Get(groupPubKey btcec.PublicKey) (*StateMachine, bool) { From fbf60f1b940a79842b3ebbed36a188735f2de71b Mon Sep 17 00:00:00 2001 From: ffranr Date: Tue, 9 Sep 2025 16:38:40 +0100 Subject: [PATCH 13/23] supplycommit: refactor asset check for reuse in supplyverifier package Extract CheckSupplyCommitSupport function for use in the supplyverifier package. Rename fetchLatestAssetMetadata to FetchLatestAssetMetadata to allow external usage. --- universe/supplycommit/manager.go | 61 +------------------------- universe/supplycommit/util.go | 74 ++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 60 deletions(-) diff --git a/universe/supplycommit/manager.go b/universe/supplycommit/manager.go index a81eafeb2..947d96de7 100644 --- a/universe/supplycommit/manager.go +++ b/universe/supplycommit/manager.go @@ -2,18 +2,15 @@ package supplycommit import ( "context" - "errors" "fmt" "sync" "time" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg" - "github.com/lightninglabs/taproot-assets/address" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" - "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapgarden" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightningnetwork/lnd/msgmux" @@ -142,55 +139,6 @@ func (m *Manager) Stop() error { return nil } -// ensureSupplyCommitSupport verifies that the asset group for the given -// asset specifier supports supply commitments, and that this node can generate -// supply commitments for it. -func (m *Manager) ensureSupplyCommitSupport(ctx context.Context, - metaReveal proof.MetaReveal) error { - - // If the universe commitment flag is not set on the asset metadata, - // then the asset group does not support supply commitments. - if !metaReveal.UniverseCommitments { - return fmt.Errorf("asset group metadata universe " + - "commitments flag indicates that asset does not " + - "support supply commitments") - } - - // If a delegation key is not present, then the asset group does not - // support supply commitments. - if metaReveal.DelegationKey.IsNone() { - return fmt.Errorf("asset group metadata does not " + - "specify delegation key, which is required for " + - "supply commitments") - } - - // Extract supply commitment delegation pub key from the asset metadata. - delegationPubKey, err := metaReveal.DelegationKey.UnwrapOrErr( - fmt.Errorf("delegation key not found for given asset"), - ) - if err != nil { - return err - } - - // Fetch the delegation key locator. We need to ensure that the - // delegation key is owned by this node, so that we can generate - // supply commitments (ignore tuples) for this asset group. - _, err = m.cfg.AssetLookup.FetchInternalKeyLocator( - ctx, &delegationPubKey, - ) - switch { - case errors.Is(err, address.ErrInternalKeyNotFound): - return fmt.Errorf("delegation key locator not found; "+ - "only delegation key owners can ignore asset "+ - "outpoints for this asset group: %w", err) - case err != nil: - return fmt.Errorf("failed to fetch delegation key locator: %w", - err) - } - - return nil -} - // startAssetSM creates and starts a new supply commitment state // machine for the given asset specifier. func (m *Manager) startAssetSM(ctx context.Context, @@ -292,14 +240,7 @@ func (m *Manager) fetchStateMachine( ctx, cancel := m.WithCtxQuitNoTimeout() defer cancel() - metaReveal, err := FetchLatestAssetMetadata( - ctx, m.cfg.AssetLookup, assetSpec, - ) - if err != nil { - return nil, fmt.Errorf("faild to fetch asset meta: %w", err) - } - - err = m.ensureSupplyCommitSupport(ctx, metaReveal) + err = CheckSupplyCommitSupport(ctx, m.cfg.AssetLookup, assetSpec, true) if err != nil { return nil, fmt.Errorf("failed to ensure supply commit "+ "support for asset: %w", err) diff --git a/universe/supplycommit/util.go b/universe/supplycommit/util.go index 51b16d6cf..dd371bbc8 100644 --- a/universe/supplycommit/util.go +++ b/universe/supplycommit/util.go @@ -2,11 +2,22 @@ package supplycommit import ( "context" + "errors" "fmt" + "github.com/lightninglabs/taproot-assets/address" + "github.com/lightninglabs/taproot-assets/asset" "github.com/lightningnetwork/lnd/fn/v2" ) +var ( + // ErrSupplyNotSupported is returned when an operation that requires + // supply commitments is attempted on an asset that does not support + // them. + ErrSupplyNotSupported = errors.New("asset does not support supply " + + "commitments") +) + // CalcTotalOutstandingSupply calculates the total outstanding supply from the // given supply subtrees. func CalcTotalOutstandingSupply(ctx context.Context, @@ -66,3 +77,66 @@ func CalcTotalOutstandingSupply(ctx context.Context, return fn.Ok[uint64](total) } + +// CheckSupplyCommitSupport verifies that the asset group for the given +// asset specifier supports supply commitments, and that this node can generate +// supply commitments for it. +func CheckSupplyCommitSupport(ctx context.Context, assetLookup AssetLookup, + assetSpec asset.Specifier, locallyControlled bool) error { + + // Fetch the latest asset metadata for the asset group. + metaReveal, err := FetchLatestAssetMetadata( + ctx, assetLookup, assetSpec, + ) + if err != nil { + return fmt.Errorf("faild to fetch asset meta: %w", err) + } + + // If the universe commitment flag is not set on the asset metadata, + // then the asset group does not support supply commitments. + if !metaReveal.UniverseCommitments { + return fmt.Errorf("asset group metadata universe "+ + "commitments flag indicates unsupported supply "+ + "commitments: %w", ErrSupplyNotSupported) + } + + // If a delegation key is not present, then the asset group does not + // support supply commitments. + if metaReveal.DelegationKey.IsNone() { + return fmt.Errorf("asset group metadata does not "+ + "specify delegation key, required for supply "+ + "commitments: %w", ErrSupplyNotSupported) + } + + // Extract supply commitment delegation pub key from the asset metadata. + delegationPubKey, err := metaReveal.DelegationKey.UnwrapOrErr( + fmt.Errorf("delegation key not found for given asset: %w", + ErrSupplyNotSupported), + ) + if err != nil { + return err + } + + // Fetch the delegation key locator. We need to ensure that the + // delegation key is owned by this node, so that we can generate + // supply commitments (ignore tuples) for this asset group. + _, err = assetLookup.FetchInternalKeyLocator( + ctx, &delegationPubKey, + ) + switch { + case errors.Is(err, address.ErrInternalKeyNotFound): + // If local key control is expected, then we return an error + // if the delegation key locator is not found. + if locallyControlled { + return fmt.Errorf("delegation key locator not found; "+ + "only delegation key owners can generate "+ + "supply commitments: %w", err) + } + + case err != nil: + return fmt.Errorf("failed to fetch delegation key locator: %w", + err) + } + + return nil +} From c4b21fd01f94f517e36e7075228ad3814baa2648 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 1 Sep 2025 17:12:58 +0100 Subject: [PATCH 14/23] supplyverifier: validate asset group before starting state machine Verify that the asset group is supported before starting a supplyverifier state machine for it. --- universe/supplyverifier/manager.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index 2c02e3277..4eabb0438 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -295,11 +295,18 @@ func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, ctx, cancel := m.WithCtxQuitNoTimeout() defer cancel() - // TODO(ffranr): Check that the asset group supports supply commitments - // and that this node does not create supply commitments for the asset - // group (i.e. it does not own the delegation key). We don't want to - // run a verifier state machine for an asset group supply commitment - // that we issue ourselves. + // Check that the asset group supports supply commitments and that + // this node does not create supply commitments for the asset group + // (i.e. it does not own the delegation key). We don't want to run + // a verifier state machine for an asset group supply commitment + // that we issue ourselves. + err = supplycommit.CheckSupplyCommitSupport( + ctx, m.cfg.AssetLookup, assetSpec, false, + ) + if err != nil { + return nil, fmt.Errorf("asset group is not suitable for "+ + "supply verifier state machine: %w", err) + } newSm, err := m.startAssetSM(ctx, assetSpec) if err != nil { From 1703846c54995c952ec6bd1a7af4c5370d93fbb3 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 1 Sep 2025 17:54:36 +0100 Subject: [PATCH 15/23] supplyverifier: remove state persistence; state can be regenerated Removed database read/write for state machine state, as recovery on reboot doesn't require stored state. The state can be fully regenerated on startup, eliminating the need for persistence and simplifying recovery/restart logic. --- universe/supplyverifier/manager.go | 31 +++--------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index 4eabb0438..02edf8cde 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -35,19 +35,6 @@ type DaemonAdapters interface { Stop() error } -// StateMachineStore is an interface that allows the state machine to persist -// its state across restarts. This is used to track the state of the state -// machine for supply verification. -type StateMachineStore interface { - // CommitState is used to commit the state of the state machine to disk. - CommitState(context.Context, asset.Specifier, State) error - - // FetchState attempts to fetch the state of the state machine for the - // target asset specifier. If the state machine doesn't exist, then a - // default state will be returned. - FetchState(context.Context, asset.Specifier) (State, error) -} - // IssuanceSubscriptions allows verifier state machines to subscribe to // asset group issuance events. type IssuanceSubscriptions interface { @@ -93,11 +80,6 @@ type ManagerCfg struct { // interact with external daemons whilst processing internal events. DaemonAdapters DaemonAdapters - // StateLog is the main state log that is used to track the state of the - // state machine. This is used to persist the state of the state machine - // across restarts. - StateLog StateMachineStore - // ErrChan is the channel that is used to send errors to the caller. ErrChan chan<- error } @@ -230,21 +212,14 @@ func (m *Manager) startAssetSM(ctx context.Context, QuitChan: m.Quit, } - // Before we start the state machine, we'll need to fetch the current - // state from disk, to see if we need to emit any new events. - initialState, err := m.cfg.StateLog.FetchState(ctx, assetSpec) - if err != nil { - return nil, fmt.Errorf("unable to fetch current state: %w", err) - } - // Create a new error reporter for the state machine. errorReporter := NewErrorReporter(assetSpec) fsmCfg := protofsm.StateMachineCfg[Event, *Environment]{ ErrorReporter: &errorReporter, - InitialState: initialState, - Env: env, - Daemon: m.cfg.DaemonAdapters, + // TODO(ffranr): Set InitialState here. + Env: env, + Daemon: m.cfg.DaemonAdapters, } newSm := protofsm.NewStateMachine[Event, *Environment](fsmCfg) From 98e26976d1fef372bffe5d748b84e14492fd1f66 Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 10 Sep 2025 09:51:16 +0100 Subject: [PATCH 16/23] supplyverifier+tapcfg: add supply verifier manager config validation --- tapcfg/server.go | 6 +++- universe/supplyverifier/manager.go | 45 ++++++++++++++++++++++++++++-- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/tapcfg/server.go b/tapcfg/server.go index a94b43711..5efb4727a 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -549,7 +549,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, // Set up the supply verifier, which validates supply commitment leaves // published by asset issuers. - supplyVerifyManager := supplyverifier.NewManager( + supplyVerifyManager, err := supplyverifier.NewManager( supplyverifier.ManagerCfg{ Chain: chainBridge, AssetLookup: tapdbAddrBook, @@ -561,6 +561,10 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, DaemonAdapters: lndFsmDaemonAdapters, }, ) + if err != nil { + return nil, fmt.Errorf("unable to create supply verifier: %w", + err) + } // For the porter, we'll make a multi-notifier comprised of all the // possible proof file sources to ensure it can always fetch input diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index 02edf8cde..cc29d17cb 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -84,6 +84,43 @@ type ManagerCfg struct { ErrChan chan<- error } +// Validate validates the ManagerCfg. +func (m *ManagerCfg) Validate() error { + if m.Chain == nil { + return fmt.Errorf("chain is required") + } + + if m.AssetLookup == nil { + return fmt.Errorf("asset lookup is required") + } + + if m.Lnd == nil { + return fmt.Errorf("lnd is required") + } + + if m.SupplyCommitView == nil { + return fmt.Errorf("supply commit view is required") + } + + if m.SupplyTreeView == nil { + return fmt.Errorf("supply tree view is required") + } + + if m.GroupFetcher == nil { + return fmt.Errorf("group fetcher is required") + } + + if m.IssuanceSubscriptions == nil { + return fmt.Errorf("issuance subscriptions is required") + } + + if m.DaemonAdapters == nil { + return fmt.Errorf("daemon adapters is required") + } + + return nil +} + // Manager is a manager for multiple supply verifier state machines, one for // each asset group. It is responsible for starting and stopping the state // machines, as well as forwarding events to them. @@ -104,14 +141,18 @@ type Manager struct { } // NewManager creates a new multi state machine manager. -func NewManager(cfg ManagerCfg) *Manager { +func NewManager(cfg ManagerCfg) (*Manager, error) { + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + return &Manager{ cfg: cfg, ContextGuard: &fn.ContextGuard{ DefaultTimeout: DefaultTimeout, Quit: make(chan struct{}), }, - } + }, nil } // InitStateMachines initializes state machines for all asset groups that From ad3e985e2964ec23ad031b39c151ec16916d40ef Mon Sep 17 00:00:00 2001 From: ffranr Date: Tue, 9 Sep 2025 23:03:07 +0100 Subject: [PATCH 17/23] supplyverifier: monitor universe syncer events to start state machines Begin monitoring universe syncer issuance events and start supplyverifier state machines for asset groups that support supply commitments and do not already have a running state machine. This allows a peer node to initiate supply verification and on-chain UTXO watching after syncing a new asset group issuance. --- universe/supplycommit/util.go | 23 ++++++ universe/supplyverifier/manager.go | 128 +++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) diff --git a/universe/supplycommit/util.go b/universe/supplycommit/util.go index dd371bbc8..4e713f534 100644 --- a/universe/supplycommit/util.go +++ b/universe/supplycommit/util.go @@ -140,3 +140,26 @@ func CheckSupplyCommitSupport(ctx context.Context, assetLookup AssetLookup, return nil } + +// IsSupplySupported checks whether the asset group for the given asset +// specifier supports supply commitments. If locallyControlled is true, +// then we also check that this node can generate supply commitments for it. +// +// NOTE: This is a convenience wrapper around CheckSupplyCommitSupport. +func IsSupplySupported(ctx context.Context, assetLookup AssetLookup, + assetSpec asset.Specifier, locallyControlled bool) (bool, error) { + + err := CheckSupplyCommitSupport( + ctx, assetLookup, assetSpec, locallyControlled, + ) + switch { + case errors.Is(err, ErrSupplyNotSupported): + return false, nil + + case err != nil: + return false, fmt.Errorf("failed to check asset for supply "+ + "support: %w", err) + } + + return true, nil +} diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index cc29d17cb..4492a63a6 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -13,6 +13,7 @@ import ( "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe" "github.com/lightninglabs/taproot-assets/universe/supplycommit" "github.com/lightningnetwork/lnd/msgmux" "github.com/lightningnetwork/lnd/protofsm" @@ -42,6 +43,10 @@ type IssuanceSubscriptions interface { // issuance events. RegisterSubscriber(receiver *fn.EventReceiver[fn.Event], deliverExisting bool, _ bool) error + + // RemoveSubscriber removes the given subscriber and also stops it from + // processing events. + RemoveSubscriber(subscriber *fn.EventReceiver[fn.Event]) error } // ManagerCfg is the configuration for the @@ -216,6 +221,14 @@ func (m *Manager) Start() error { "state machines: %v", err) return } + + // Start a goroutine to handle universe syncer issuance events. + m.ContextGuard.Goroutine( + m.MonitorUniSyncEvents, func(err error) { + log.Errorf("MonitorUniIssuanceSyncEvents: %v", + err) + }, + ) }) if startErr != nil { return fmt.Errorf("unable to start manager: %w", startErr) @@ -224,6 +237,121 @@ func (m *Manager) Start() error { return nil } +// handleUniSyncEvent handles a single universe syncer event. If the event is an +// issuance event for an asset group that supports supply commitments, it will +// ensure that a state machine for the asset group exists, creating and +// starting it if necessary. +func (m *Manager) handleUniSyncEvent(event fn.Event) error { + // Disregard event if it is not of type + // universe.SyncDiffEvent. + syncDiffEvent, ok := event.(*universe.SyncDiffEvent) + if !ok { + return nil + } + + // If the sync diff is not a new issuance, we disregard it. + universeID := syncDiffEvent.SyncDiff.NewUniverseRoot.ID + if universeID.ProofType != universe.ProofTypeIssuance { + return nil + } + + // If the asset is not a group key asset, we + // disregard it. + if universeID.GroupKey == nil { + return nil + } + + // If there are no new leaf proofs, we disregard the sync event. + if len(syncDiffEvent.SyncDiff.NewLeafProofs) == 0 { + return nil + } + + // Get genesis asset ID from the first synced leaf and formulate an + // asset specifier. + // + // TODO(ffranr): Revisit this. We select any asset ID to aid in metdata + // retrieval, but we should be able to do this with just the group key. + // However, QueryAssetGroupByGroupKey currently fails for the asset + // group. + assetID := syncDiffEvent.SyncDiff.NewLeafProofs[0].Genesis.ID() + + assetSpec := asset.NewSpecifierOptionalGroupPubKey( + assetID, universeID.GroupKey, + ) + + // Check that the asset group supports supply + // commitments. + ctx, cancelCtx := m.WithCtxQuitNoTimeout() + isSupported, err := supplycommit.IsSupplySupported( + ctx, m.cfg.AssetLookup, assetSpec, false, + ) + if err != nil { + return fmt.Errorf("failed to check supply support: %w", err) + } + cancelCtx() + + if !isSupported { + return nil + } + + // Fetch the state machine for the asset group, creating and starting it + // if it doesn't exist. + log.Debugf("Ensure supply verifier state machine for asset "+ + "group due to universe syncer issuance event (asset=%s)", + assetSpec.String()) + _, err = m.fetchStateMachine(assetSpec) + if err != nil { + return fmt.Errorf("unable to get or create state machine: %w", + err) + } + + return nil +} + +// MonitorUniSyncEvents registers an event receiver to receive universe +// syncer issuance events. +// +// NOTE: This method must be run as a goroutine. +func (m *Manager) MonitorUniSyncEvents() error { + // Register an event receiver to receive universe syncer events. These + // events relate to asset issuance proofs. + eventReceiver := fn.NewEventReceiver[fn.Event]( + fn.DefaultQueueSize, + ) + err := m.cfg.IssuanceSubscriptions.RegisterSubscriber( + eventReceiver, false, true, + ) + if err != nil { + return fmt.Errorf("unable to register universe syncer "+ + "issuance event subscriber: %w", err) + } + + // Ensure we remove the subscriber when we exit. + defer func() { + err := m.cfg.IssuanceSubscriptions.RemoveSubscriber( + eventReceiver, + ) + if err != nil { + log.Errorf("unable to remove universe syncer "+ + "issuance event subscriber: %v", err) + } + }() + + for { + select { + case <-m.Quit: + return nil + + case event := <-eventReceiver.NewItemCreated.ChanOut(): + err := m.handleUniSyncEvent(event) + if err != nil { + return fmt.Errorf("unable to handle "+ + "universe issuance sync event: %w", err) + } + } + } +} + // Stop stops the multi state machine manager, which in turn stops all asset // group key specific supply verifier state machines. func (m *Manager) Stop() error { From fb3db9138a1a928a39b47f4fb56ae788ea7ba5ea Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 10 Sep 2025 14:40:13 +0100 Subject: [PATCH 18/23] supplycommit: use provided asset ID in metadata lookup if set If an asset ID is supplied, use it when fetching asset metadata in FetchLatestAssetMetadata. --- universe/supplycommit/env.go | 18 ++++++++++++++++++ universe/supplycommit/state_machine_test.go | 9 +++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/universe/supplycommit/env.go b/universe/supplycommit/env.go index be5a59442..d0ecbfdd0 100644 --- a/universe/supplycommit/env.go +++ b/universe/supplycommit/env.go @@ -268,6 +268,24 @@ func FetchLatestAssetMetadata(ctx context.Context, lookup AssetLookup, var zero proof.MetaReveal + // If the asset specifier has an asset ID, then we'll use that to + // fetch the asset metadata. + if assetSpec.HasId() { + assetID, err := assetSpec.UnwrapIdOrErr() + if err != nil { + return zero, err + } + + metaReveal, err := lookup.FetchAssetMetaForAsset(ctx, assetID) + if err != nil { + return zero, fmt.Errorf("faild to fetch asset meta: %w", + err) + } + + return *metaReveal, nil + } + + // Otherwise, we'll need to fetch the asset group using the group key. groupKey, err := assetSpec.UnwrapGroupKeyOrErr() if err != nil { return zero, err diff --git a/universe/supplycommit/state_machine_test.go b/universe/supplycommit/state_machine_test.go index b3a62f2a5..9661dfc0f 100644 --- a/universe/supplycommit/state_machine_test.go +++ b/universe/supplycommit/state_machine_test.go @@ -1339,11 +1339,16 @@ func TestSupplyCommitFinalizeStateTransitions(t *testing.T) { }, assetSpec: assetIDSpec, }) + + h.expectAssetLookup() + h.expectSupplySyncer() + h.expectApplyStateTransition() + h.start() defer h.stopAndAssert() - expectedErr := errors.New("unable to fetch latest asset " + - "metadata: unable to unwrap asset group public key") + expectedErr := errors.New("group key must be specified for " + + "supply tree: unable to unwrap asset group public key") h.expectFailure(expectedErr) finalizeEvent := &FinalizeEvent{} From d0e9b79cbdd3568683f09e7df53542ef2ee885df Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 12:44:17 +0100 Subject: [PATCH 19/23] supplycommit: reduce log level of tx/commit output to trace/debug Lower the log level of transaction and commit-related output from info to trace/debug to reduce noise and improve log file readability. This level of detail is no longer needed at the info level. --- universe/supplycommit/transitions.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/universe/supplycommit/transitions.go b/universe/supplycommit/transitions.go index 9f80837a7..b70fb66b2 100644 --- a/universe/supplycommit/transitions.go +++ b/universe/supplycommit/transitions.go @@ -452,7 +452,7 @@ func newRootCommitment(ctx context.Context, ) (*RootCommitment, *psbt.Packet, error) { logger.WhenSome(func(l btclog.Logger) { - l.Infof("Creating new root commitment, spending %v "+ + l.Debugf("Creating new root commitment, spending %v "+ "pre-commits", len(unspentPreCommits)) }) @@ -495,7 +495,7 @@ func newRootCommitment(ctx context.Context, var spentCommitOp fn.Option[wire.OutPoint] oldCommitment.WhenSome(func(r RootCommitment) { logger.WhenSome(func(l btclog.Logger) { - l.Infof("Re-using prior commitment as outpoint=%v: %v", + l.Tracef("Re-using prior commitment as outpoint=%v: %v", r.CommitPoint(), limitSpewer.Sdump(r)) }) @@ -603,7 +603,7 @@ func newRootCommitment(ctx context.Context, } logger.WhenSome(func(l btclog.Logger) { - l.Infof("Created new root commitment: %v", + l.Tracef("Created new root commitment: %v", limitSpewer.Sdump(newSupplyCommit)) }) @@ -836,7 +836,7 @@ func (s *CommitTxSignState) ProcessEvent(event Event, "commitment tx: %w", err) } - prefixedLog.Infof("Signed supply "+ + prefixedLog.Tracef("Signed supply "+ "commitment txn: %v", limitSpewer.Sdump(signedPsbt)) err = psbt.MaybeFinalizeAll(signedPsbt) @@ -931,7 +931,7 @@ func (c *CommitBroadcastState) ProcessEvent(event Event, } commitTxid := c.SupplyTransition.NewCommitment.Txn.TxHash() - prefixedLog.Infof("Broadcasting supply commitment "+ + prefixedLog.Tracef("Broadcasting supply commitment "+ "txn (txid=%v): %v", commitTxid, limitSpewer.Sdump(c.SupplyTransition.NewCommitment.Txn)) @@ -1022,7 +1022,7 @@ func (c *CommitBroadcastState) ProcessEvent(event Event, TxIndex: newEvent.TxIndex, }) - prefixedLog.Infof("Supply commitment txn confirmed "+ + prefixedLog.Tracef("Supply commitment txn confirmed "+ "in block %d (hash=%v): %v", newEvent.BlockHeight, newEvent.Block.Header.BlockHash(), limitSpewer.Sdump(c.SupplyTransition.NewCommitment.Txn)) From 9cc70197dde1d6a3455f2acaa34d8788bf9a38e7 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 14:56:23 +0100 Subject: [PATCH 20/23] supplycommit: avoid mock variable name shadowing with outer scope --- universe/supplycommit/state_machine_test.go | 66 ++++++++++----------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/universe/supplycommit/state_machine_test.go b/universe/supplycommit/state_machine_test.go index 9661dfc0f..8f847fa43 100644 --- a/universe/supplycommit/state_machine_test.go +++ b/universe/supplycommit/state_machine_test.go @@ -138,37 +138,37 @@ type supplyCommitTestHarness struct { func newSupplyCommitTestHarness(t *testing.T, cfg *harnessCfg) *supplyCommitTestHarness { - mockTreeView := &mockSupplyTreeView{} - mockCommits := &mockCommitmentTracker{} - mockWallet := &mockWallet{} - mockKey := &mockKeyRing{} - mockChain := &mockChainBridge{} - mockStateLog := &mockStateMachineStore{} - mockDaemon := newMockDaemonAdapters() - mockErrReporter := &mockErrorReporter{} - mockCache := &mockIgnoreCheckerCache{} - mockAssetLookup := &mockAssetLookup{} - mockSupplySyncer := &mockSupplySyncer{} + mTreeView := &mockSupplyTreeView{} + mCommits := &mockCommitmentTracker{} + mWallet := &mockWallet{} + mKey := &mockKeyRing{} + mChain := &mockChainBridge{} + mStateLog := &mockStateMachineStore{} + mDaemon := newMockDaemonAdapters() + mErrReporter := &mockErrorReporter{} + mCache := &mockIgnoreCheckerCache{} + mAssetLookup := &mockAssetLookup{} + mSupplySyncer := &mockSupplySyncer{} env := &Environment{ AssetSpec: cfg.assetSpec, - TreeView: mockTreeView, - Commitments: mockCommits, - Wallet: mockWallet, - KeyRing: mockKey, - Chain: mockChain, - StateLog: mockStateLog, - AssetLookup: mockAssetLookup, - SupplySyncer: mockSupplySyncer, + TreeView: mTreeView, + Commitments: mCommits, + Wallet: mWallet, + KeyRing: mKey, + Chain: mChain, + StateLog: mStateLog, + AssetLookup: mAssetLookup, + SupplySyncer: mSupplySyncer, CommitConfTarget: DefaultCommitConfTarget, - IgnoreCheckerCache: mockCache, + IgnoreCheckerCache: mCache, } fsmCfg := Config{ InitialState: cfg.initialState, Env: env, - Daemon: mockDaemon, - ErrorReporter: mockErrReporter, + Daemon: mDaemon, + ErrorReporter: mErrReporter, InitEvent: lfn.None[protofsm.DaemonEvent](), MsgMapper: lfn.None[protofsm.MsgMapper[Event]](), CustomPollInterval: lfn.Some(time.Second), @@ -181,17 +181,17 @@ func newSupplyCommitTestHarness(t *testing.T, cfg: cfg, stateMachine: &stateMachine, env: env, - mockTreeView: mockTreeView, - mockCommits: mockCommits, - mockWallet: mockWallet, - mockKeyRing: mockKey, - mockChain: mockChain, - mockStateLog: mockStateLog, - mockCache: mockCache, - mockDaemon: mockDaemon, - mockErrReporter: mockErrReporter, - mockAssetLookup: mockAssetLookup, - mockSupplySyncer: mockSupplySyncer, + mockTreeView: mTreeView, + mockCommits: mCommits, + mockWallet: mWallet, + mockKeyRing: mKey, + mockChain: mChain, + mockStateLog: mStateLog, + mockCache: mCache, + mockDaemon: mDaemon, + mockErrReporter: mErrReporter, + mockAssetLookup: mAssetLookup, + mockSupplySyncer: mSupplySyncer, } h.stateSub = stateMachine.RegisterStateEvents() From 056591e7c83407c32d5f685ce6c2090d7d65fed9 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 15:16:37 +0100 Subject: [PATCH 21/23] supplyverifier: rewrite supply verifier state machine Significant rewrite of state machine events, states, and transitions. Replaces the placeholder state machine. --- tapcfg/server.go | 1 + universe/supplyverifier/env.go | 46 ++- universe/supplyverifier/events.go | 80 ++++++ universe/supplyverifier/manager.go | 25 +- universe/supplyverifier/states.go | 74 +++-- universe/supplyverifier/transitions.go | 381 +++++++++++++++++++++++++ 6 files changed, 573 insertions(+), 34 deletions(-) create mode 100644 universe/supplyverifier/events.go create mode 100644 universe/supplyverifier/transitions.go diff --git a/tapcfg/server.go b/tapcfg/server.go index 5efb4727a..fcd66f49a 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -556,6 +556,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, Lnd: lndServices, SupplyCommitView: supplyCommitStore, SupplyTreeView: supplyTreeStore, + SupplySyncer: supplySyncer, GroupFetcher: assetMintingStore, IssuanceSubscriptions: universeSyncer, DaemonAdapters: lndFsmDaemonAdapters, diff --git a/universe/supplyverifier/env.go b/universe/supplyverifier/env.go index 12b816c78..a7f7002e9 100644 --- a/universe/supplyverifier/env.go +++ b/universe/supplyverifier/env.go @@ -3,8 +3,10 @@ package supplyverifier import ( "context" "fmt" + "time" "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/lndclient" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/mssmt" @@ -33,10 +35,17 @@ type SupplyCommitView interface { assetSpec asset.Specifier, localIssuerOnly bool) lfn.Result[supplycommit.PreCommits] - // SupplyCommit returns the latest supply commitment for a given asset - // spec. - SupplyCommit(ctx context.Context, - assetSpec asset.Specifier) supplycommit.RootCommitResp + // FetchStartingCommitment fetches the very first supply commitment of + // an asset group. If no commitment is found, it returns + // ErrCommitmentNotFound. + FetchStartingCommitment(ctx context.Context, + assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) + + // FetchLatestCommitment fetches the latest supply commitment of an + // asset group. If no commitment is found, it returns + // ErrCommitmentNotFound. + FetchLatestCommitment(ctx context.Context, + assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) // FetchCommitmentByOutpoint fetches a supply commitment by its outpoint // and group key. If no commitment is found, it returns @@ -53,12 +62,6 @@ type SupplyCommitView interface { spentOutpoint wire.OutPoint) (*supplycommit.RootCommitment, error) - // FetchStartingCommitment fetches the very first supply commitment of - // an asset group. If no commitment is found, it returns - // ErrCommitmentNotFound. - FetchStartingCommitment(ctx context.Context, - assetSpec asset.Specifier) (*supplycommit.RootCommitment, error) - // InsertSupplyCommit inserts a supply commitment into the database. InsertSupplyCommit(ctx context.Context, assetSpec asset.Specifier, commit supplycommit.RootCommitment, @@ -100,6 +103,29 @@ type Environment struct { // pre-commitments. SupplyCommitView SupplyCommitView + // SupplyTreeView is used to fetch supply leaves by height. + SupplyTreeView SupplyTreeView + + // AssetLookup is used to look up asset information such as asset groups + // and asset metadata. + AssetLookup supplycommit.AssetLookup + + // Lnd is a collection of useful LND clients. + Lnd *lndclient.LndServices + + // GroupFetcher is used to fetch asset groups. + GroupFetcher tapgarden.GroupFetcher + + // SupplySyncer is used to retrieve supply commitments from a universe + // server. + SupplySyncer SupplySyncer + + // SpendSyncDelay is the wait time after detecting a spend before + // starting the sync of the corresponding supply commitment. The delay + // allows the peer node to submit the new commitment to the universe + // server and for it to be available for retrieval. + SpendSyncDelay time.Duration + // ErrChan is the channel that is used to send errors to the caller. ErrChan chan<- error diff --git a/universe/supplyverifier/events.go b/universe/supplyverifier/events.go new file mode 100644 index 000000000..b103e0497 --- /dev/null +++ b/universe/supplyverifier/events.go @@ -0,0 +1,80 @@ +package supplyverifier + +import ( + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/protofsm" +) + +// Event is a special interface used to create the equivalent of a sum-type, but +// using a "sealed" interface. +type Event interface { + eventSealed() +} + +// FsmEvent is a type alias for the event type of the supply verifier state +// machine. +type FsmEvent = protofsm.EmittedEvent[Event] + +// InitEvent is the first event that is sent to the state machine. +type InitEvent struct{} + +// eventSealed is a special method that is used to seal the interface. +func (i *InitEvent) eventSealed() {} + +// SyncVerifyEvent is sent to SyncVerifyState to prompt it to sync-verify +// starting from the given outpoint, or from scratch if no outpoint is given. +type SyncVerifyEvent struct { + // SpentCommitOutpoint is an optional outpoint that was spent which + // triggered the need to start syncing from the beginning. If this is + // None, then we will sync from the first supply commitment. + SpentCommitOutpoint fn.Option[wire.OutPoint] +} + +// eventSealed is a special method that is used to seal the interface. +func (e *SyncVerifyEvent) eventSealed() {} + +// WatchOutputsEvent is an event that carries the set of outputs to watch. +type WatchOutputsEvent struct { + // PreCommits is the set of all pre-commitments that should be watched + // for a spend. + PreCommits supplycommit.PreCommits + + // SupplyCommit is the latest known supply commitment that should be + // watched for a spend. + SupplyCommit *supplycommit.RootCommitment +} + +// eventSealed is a special method that is used to seal the interface. +func (e *WatchOutputsEvent) eventSealed() {} + +// SpendEvent is sent in response to an intent to be notified of a spend of an +// outpoint. +type SpendEvent struct { + // SpendDetail is the details of the spend that was observed on-chain. + SpendDetail *chainntnfs.SpendDetail + + // PreCommitments is the set of all pre-commitments that were being + // watched for a spend. + PreCommitments []supplycommit.PreCommitment + + // SpentPreCommitment is the pre-commitment that was spent. This will + // be non-nil only if the spent output was a pre-commitment. + SpentPreCommitment *supplycommit.PreCommitment + + // SpentSupplyCommitment is the supply commitment that was spent. This + // will be non-nil only if the spent output was a supply commitment. + SpentSupplyCommitment *supplycommit.RootCommitment + + // WatchStartTimestamp records when monitoring for this spend began. + // It is used to calculate the delay before syncing, giving the issuer + // time to publish the new supply commitment. + WatchStartTimestamp time.Time +} + +// eventSealed is a special method that is used to seal the interface. +func (s *SpendEvent) eventSealed() {} diff --git a/universe/supplyverifier/manager.go b/universe/supplyverifier/manager.go index 4492a63a6..0bdca7fde 100644 --- a/universe/supplyverifier/manager.go +++ b/universe/supplyverifier/manager.go @@ -22,6 +22,13 @@ import ( const ( // DefaultTimeout is the context guard default timeout. DefaultTimeout = 30 * time.Second + + // DefaultSpendSyncDelay is the default delay to wait after a spend + // notification is received before starting the sync of the + // corresponding supply commitment. The delay allows the peer node to + // submit the new commitment to the universe server and for it to be + // available for retrieval + DefaultSpendSyncDelay = 5 * time.Second ) // DaemonAdapters is a wrapper around the protofsm.DaemonAdapters interface @@ -372,11 +379,20 @@ func (m *Manager) Stop() error { func (m *Manager) startAssetSM(ctx context.Context, assetSpec asset.Specifier) (*StateMachine, error) { + log.Infof("Starting supply verifier state machine (asset=%s)", + assetSpec.String()) + // If the state machine is not found, create a new one. env := &Environment{ AssetSpec: assetSpec, Chain: m.cfg.Chain, SupplyCommitView: m.cfg.SupplyCommitView, + SupplyTreeView: m.cfg.SupplyTreeView, + AssetLookup: m.cfg.AssetLookup, + Lnd: m.cfg.Lnd, + GroupFetcher: m.cfg.GroupFetcher, + SupplySyncer: m.cfg.SupplySyncer, + SpendSyncDelay: DefaultSpendSyncDelay, ErrChan: m.cfg.ErrChan, QuitChan: m.Quit, } @@ -386,9 +402,9 @@ func (m *Manager) startAssetSM(ctx context.Context, fsmCfg := protofsm.StateMachineCfg[Event, *Environment]{ ErrorReporter: &errorReporter, - // TODO(ffranr): Set InitialState here. - Env: env, - Daemon: m.cfg.DaemonAdapters, + InitialState: &InitState{}, + Env: env, + Daemon: m.cfg.DaemonAdapters, } newSm := protofsm.NewStateMachine[Event, *Environment](fsmCfg) @@ -436,6 +452,9 @@ func (m *Manager) fetchStateMachine(assetSpec asset.Specifier) (*StateMachine, // the cache with a new running instance. } + log.Debugf("Creating new supply verifier state machine for "+ + "group: %x", groupKey.SerializeCompressed()) + ctx, cancel := m.WithCtxQuitNoTimeout() defer cancel() diff --git a/universe/supplyverifier/states.go b/universe/supplyverifier/states.go index 1bc5f04a1..e9017e339 100644 --- a/universe/supplyverifier/states.go +++ b/universe/supplyverifier/states.go @@ -12,17 +12,6 @@ var ( ErrInvalidStateTransition = fmt.Errorf("invalid state transition") ) -// Event is a special interface used to create the equivalent of a sum-type, but -// using a "sealed" interface. -type Event interface { - eventSealed() -} - -// Events is a special type constraint that enumerates all the possible protocol -// events. -type Events interface { -} - // StateTransition is the StateTransition type specific to the supply verifier // state machine. type StateTransition = protofsm.StateTransition[Event, *Environment] @@ -36,6 +25,59 @@ type State interface { String() string } +// InitState is the starting state of the machine. In this state we decide +// whether to start syncing immediately or wait for spends before syncing. +type InitState struct { +} + +// stateSealed is a special method that is used to seal the interface. +func (s *InitState) stateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (s *InitState) IsTerminal() bool { + return false +} + +// String returns the name of the state. +func (s *InitState) String() string { + return "InitState" +} + +// SyncVerifyState is the state where we sync proofs related to a +// supply commitment transaction. +type SyncVerifyState struct{} + +// stateSealed is a special method that is used to seal the interface. +func (s *SyncVerifyState) stateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (s *SyncVerifyState) IsTerminal() bool { + return false +} + +// String returns the name of the state. +func (s *SyncVerifyState) String() string { + return "SyncVerifyState" +} + +// WatchOutputsState waits for one of the watched outputs to be spent. +// If an output is already spent, we transition immediately. +// This state avoids wasted sync polling of universe servers. +type WatchOutputsState struct{} + +// stateSealed is a special method that is used to seal the interface. +func (s *WatchOutputsState) stateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (s *WatchOutputsState) IsTerminal() bool { + return false +} + +// String returns the name of the state. +func (s *WatchOutputsState) String() string { + return "WatchOutputsState" +} + // StateMachine is a state machine that handles verifying the on-chain supply // commitment for a given asset. type StateMachine = protofsm.StateMachine[Event, *Environment] @@ -47,16 +89,6 @@ type Config = protofsm.StateMachineCfg[Event, *Environment] // FsmState is a type alias for the state of the supply verifier state machine. type FsmState = protofsm.State[Event, *Environment] -// FsmEvent is a type alias for the event type of the supply verifier state -// machine. -type FsmEvent = protofsm.EmittedEvent[Event] - // StateSub is a type alias for the state subscriber of the supply verifier // state machine. type StateSub = protofsm.StateSubscriber[Event, *Environment] - -// InitEvent is the first event that is sent to the state machine. -type InitEvent struct{} - -// eventSealed is a special method that is used to seal the interface. -func (i *InitEvent) eventSealed() {} diff --git a/universe/supplyverifier/transitions.go b/universe/supplyverifier/transitions.go new file mode 100644 index 000000000..d3d5cdeb2 --- /dev/null +++ b/universe/supplyverifier/transitions.go @@ -0,0 +1,381 @@ +package supplyverifier + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + "github.com/lightningnetwork/lnd/chainntnfs" + lfn "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/protofsm" +) + +// ProcessEvent handles the initial state transition for the supply verifier. +func (s *InitState) ProcessEvent(event Event, + env *Environment) (*StateTransition, error) { + + switch event.(type) { + case *InitEvent: + ctx := context.Background() + + // Retrieve the set of unspent pre-commitments for the asset + // group. We will need these later to watch their spends. + preCommits, err := env.SupplyCommitView.UnspentPrecommits( + ctx, env.AssetSpec, false, + ).Unpack() + if err != nil { + return nil, fmt.Errorf("unable to fetch unspent "+ + "pre-commitments: %w", err) + } + + // Query local db for the latest verified supply commitment. + latestCommit, err := env.SupplyCommitView.FetchLatestCommitment( + ctx, env.AssetSpec, + ) + switch { + case errors.Is(err, ErrCommitmentNotFound): + // Continue without the latest commitment. + + case err != nil: + return nil, fmt.Errorf("unable to fetch latest "+ + "verified commitment from db: %w", err) + } + + // If at this point we don't have any pre-commitments or a + // verified supply commitment, then we'll have to raise an + // error. Something went wrong before this point. + if latestCommit == nil && len(preCommits) == 0 { + return nil, fmt.Errorf("no pre-commitments or " + + "verified supply commitment found") + } + + return &StateTransition{ + NextState: &WatchOutputsState{}, + NewEvents: lfn.Some(FsmEvent{ + InternalEvent: []Event{ + &WatchOutputsEvent{ + PreCommits: preCommits, + SupplyCommit: latestCommit, + }, + }, + }), + }, nil + + default: + return nil, fmt.Errorf("%w: received %T while in %T", + ErrInvalidStateTransition, event, s) + } +} + +// maybeFetchSupplyCommit attempts to fetch a supply commitment by the +// specified spent outpoint. If no commitment is found, then a None option is +// returned. +func maybeFetchSupplyCommit(ctx context.Context, env *Environment, + spentOutpoint wire.OutPoint) (fn.Option[supplycommit.RootCommitment], + error) { + + var zero fn.Option[supplycommit.RootCommitment] + + commit, err := env.SupplyCommitView.FetchCommitmentBySpentOutpoint( + ctx, env.AssetSpec, spentOutpoint, + ) + switch { + case errors.Is(err, ErrCommitmentNotFound): + return zero, nil + + case err != nil: + return zero, fmt.Errorf("unable to query db for commitment: %w", + err) + } + + return fn.MaybeSome(commit), nil +} + +// ProcessEvent handles state transitions for the SyncVerifyState. +func (s *SyncVerifyState) ProcessEvent(event Event, + env *Environment) (*StateTransition, error) { + + switch e := event.(type) { + case *SyncVerifyEvent: + ctx := context.Background() + + // Check to ensure that we haven't already processed a supply + // commitment for the spent outpoint, if one was provided. + if e.SpentCommitOutpoint.IsSome() { + spentOutpoint, err := e.SpentCommitOutpoint.UnwrapOrErr( + fmt.Errorf("spent outpoint unexpectedly " + + "missing"), + ) + if err != nil { + return nil, err + } + + commitOpt, err := maybeFetchSupplyCommit( + ctx, env, spentOutpoint, + ) + if err != nil { + return nil, err + } + + // If we found a commitment, then we've already + // processed this supply commit, so we can + // transition to the watch state. + if commitOpt.IsSome() { + commit, err := commitOpt.UnwrapOrErr( + fmt.Errorf("commitment missing"), + ) + if err != nil { + return nil, err + } + + watchEvent := WatchOutputsEvent{ + SupplyCommit: &commit, + } + return &StateTransition{ + NextState: &WatchOutputsState{}, + NewEvents: lfn.Some(FsmEvent{ + InternalEvent: []Event{ + &watchEvent, + }, + }), + }, nil + } + } + + // If we reach this point, then we need to actually sync pull + // supply commitment(s). + // + // Retrieve latest canonical universe list from the latest + // metadata for the asset group. + metadata, err := supplycommit.FetchLatestAssetMetadata( + ctx, env.AssetLookup, env.AssetSpec, + ) + if err != nil { + return nil, fmt.Errorf("unable to fetch latest asset "+ + "metadata: %w", err) + } + + canonicalUniverses := metadata.CanonicalUniverses.UnwrapOr( + []url.URL{}, + ) + + log.Debugf("Syncing supply commitment (asset=%s)", + env.AssetSpec.String()) + res, err := env.SupplySyncer.PullSupplyCommitment( + ctx, env.AssetSpec, e.SpentCommitOutpoint, + canonicalUniverses, + ) + if err != nil { + return nil, fmt.Errorf("unable to pull supply "+ + "commitment: %w", err) + } + + // Verify the pulled commitment. + supplyCommit, err := res.FetchResult.UnwrapOrErr( + fmt.Errorf("no commitment found"), + ) + if err != nil { + return nil, err + } + + verifier, err := NewVerifier( + VerifierCfg{ + ChainBridge: env.Chain, + AssetLookup: env.AssetLookup, + Lnd: env.Lnd, + GroupFetcher: env.GroupFetcher, + SupplyCommitView: env.SupplyCommitView, + SupplyTreeView: env.SupplyTreeView, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable to create verifier: %w", + err) + } + + err = verifier.VerifyCommit( + ctx, env.AssetSpec, supplyCommit.RootCommitment, + supplyCommit.SupplyLeaves, + ) + if err != nil { + return nil, fmt.Errorf("unable to verify supply "+ + "commitment: %w", err) + } + + // Store the verified commitment. + err = env.SupplyCommitView.InsertSupplyCommit( + ctx, env.AssetSpec, supplyCommit.RootCommitment, + supplyCommit.SupplyLeaves, + ) + if err != nil { + return nil, fmt.Errorf("unable to store supply "+ + "commitment: %w", err) + } + + // After syncing, verifying, and storing the latest supply + // commitment, transition to the watch state to await its spend. + watchEvent := WatchOutputsEvent{ + SupplyCommit: &supplyCommit.RootCommitment, + } + return &StateTransition{ + NextState: &WatchOutputsState{}, + NewEvents: lfn.Some(FsmEvent{ + InternalEvent: []Event{ + &watchEvent, + }, + }), + }, nil + + case *SpendEvent: + // A watched output has been spent, so transition to the sync + // state to fetch the new supply commitment. Before syncing, + // apply a delay to give the issuer time to publish it. + switch { + case e.WatchStartTimestamp.IsZero(): + // No watch start timestamp: wait the full sync delay. + time.Sleep(env.SpendSyncDelay) + + default: + // With a watch start timestamp: wait only the remaining + // time if the elapsed time is less than the sync delay. + timeSinceWatch := time.Since(e.WatchStartTimestamp) + if timeSinceWatch < env.SpendSyncDelay { + delay := env.SpendSyncDelay - timeSinceWatch + time.Sleep(delay) + } + } + + // After the wait, transition to the sync state to fetch the new + // supply commitment. + var spentCommitOutpoint fn.Option[wire.OutPoint] + if e.SpentSupplyCommitment != nil { + spentCommitOutpoint = fn.Some( + e.SpentSupplyCommitment.CommitPoint(), + ) + } + + syncEvent := SyncVerifyEvent{ + SpentCommitOutpoint: spentCommitOutpoint, + } + return &StateTransition{ + NextState: &SyncVerifyState{}, + NewEvents: lfn.Some(FsmEvent{ + InternalEvent: []Event{ + &syncEvent, + }, + }), + }, nil + + default: + return nil, fmt.Errorf("%w: received %T while in %T", + ErrInvalidStateTransition, event, s) + } +} + +// ProcessEvent handles the state transition for the WatchOutputsState. +func (s *WatchOutputsState) ProcessEvent(event Event, + env *Environment) (*StateTransition, error) { + + switch e := event.(type) { + case *WatchOutputsEvent: + preCommits := e.PreCommits + + // If no pre-commitments were provided, then we'll query our + // local view for the set of unspent pre-commitments. + if len(preCommits) == 0 { + var ( + ctx = context.Background() + err error + ) + + preCommits, err = + env.SupplyCommitView.UnspentPrecommits( + ctx, env.AssetSpec, false, + ).Unpack() + if err != nil { + return nil, fmt.Errorf("unable to fetch "+ + "unspent pre-commitments: %w", err) + } + } + + // Timestamp marking when output watching begins. Stored in the + // spend event to calculate watch duration when a spend + // notification arrives. + watchStartTimestamp := time.Now().UTC() + + // Formulate registered spend events for each of the + // pre-commitment outputs that should be watched. + events := make(protofsm.DaemonEventSet, 0, len(preCommits)+1) + for idx := range preCommits { + preCommit := preCommits[idx] + + txOut := preCommit.MintingTxn.TxOut[preCommit.OutIdx] + mapper := func(spend *chainntnfs.SpendDetail) Event { + // nolint: lll + return &SpendEvent{ + SpendDetail: spend, + SpentPreCommitment: &preCommit, + PreCommitments: preCommits, + WatchStartTimestamp: watchStartTimestamp, + } + } + + events = append(events, &protofsm.RegisterSpend[Event]{ + OutPoint: preCommit.OutPoint(), + PkScript: txOut.PkScript, + HeightHint: preCommit.BlockHeight, + PostSpendEvent: lfn.Some( + protofsm.SpendMapper[Event](mapper), + ), + }) + } + + // If a supply commitment was provided, we'll also register a + // spend event for its output. + if e.SupplyCommit != nil { + outpoint := wire.OutPoint{ + Hash: e.SupplyCommit.Txn.TxHash(), + Index: e.SupplyCommit.TxOutIdx, + } + txOutIdx := e.SupplyCommit.TxOutIdx + txOut := e.SupplyCommit.Txn.TxOut[txOutIdx] + + sc := e.SupplyCommit + mapper := func(spend *chainntnfs.SpendDetail) Event { + // nolint: lll + return &SpendEvent{ + SpendDetail: spend, + SpentSupplyCommitment: sc, + PreCommitments: preCommits, + WatchStartTimestamp: watchStartTimestamp, + } + } + + events = append(events, &protofsm.RegisterSpend[Event]{ + OutPoint: outpoint, + PkScript: txOut.PkScript, + PostSpendEvent: lfn.Some( + protofsm.SpendMapper[Event](mapper), + ), + }) + } + + // Otherwise, we'll transition to the verify state to await + // a spend of one of the outputs we're watching. + return &StateTransition{ + NextState: &SyncVerifyState{}, + NewEvents: lfn.Some(FsmEvent{ + ExternalEvents: events, + }), + }, nil + + default: + return nil, fmt.Errorf("%w: received %T while in %T", + ErrInvalidStateTransition, event, s) + } +} From e0292dd15fd2d5c9bcad6ce8387e421ea096f6b3 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 15:26:34 +0100 Subject: [PATCH 22/23] itest: extend testSupplyCommitIgnoreAsset to check peer commit retrieval Extend the testSupplyCommitIgnoreAsset integration test to verify that the verifying peer node correctly processes the supply commitment and that the commitment can be retrieved from this peer. --- itest/supply_commit_test.go | 55 +++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/itest/supply_commit_test.go b/itest/supply_commit_test.go index 1eaf1429d..9413a942a 100644 --- a/itest/supply_commit_test.go +++ b/itest/supply_commit_test.go @@ -3,6 +3,7 @@ package itest import ( "bytes" "context" + "strings" "time" "github.com/btcsuite/btcd/btcec/v2" @@ -586,6 +587,60 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) { t.t, rpcAsset.Amount, uniFetchResp.IgnoreSubtreeRoot.RootNode.RootSum, ) + + t.Log("Attempting to fetch supply commit from secondary node") + + var peerFetchResp *unirpc.FetchSupplyCommitResponse + require.Eventually(t.t, func() bool { + // nolint: lll + peerFetchResp, err = secondTapd.FetchSupplyCommit( + ctxb, &unirpc.FetchSupplyCommitRequest{ + GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{ + GroupKeyBytes: groupKeyBytes, + }, + Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{ + VeryFirst: true, + }, + }, + ) + if err != nil && + strings.Contains(err.Error(), "commitment not found") { + + return false + } + require.NoError(t.t, err) + + // If the fetch response has no block height or hash, + // it means that the supply commitment transaction has not + // been mined yet, so we should retry. + if peerFetchResp.ChainData.BlockHeight == 0 || + len(peerFetchResp.ChainData.BlockHash) == 0 { + + return false + } + + // Once the ignore tree includes the ignored asset outpoint, we + // know that the supply commitment has been updated. + if peerFetchResp.IgnoreSubtreeRoot == nil { + return false + } + + return true + }, defaultWaitTimeout, time.Second) + + require.NotNil(t.t, peerFetchResp) + require.Len(t.t, peerFetchResp.IssuanceLeaves, 1) + require.Len(t.t, peerFetchResp.BurnLeaves, 0) + require.Len(t.t, peerFetchResp.IgnoreLeaves, 2) + + require.EqualValues( + t.t, rpcAsset.Amount, + peerFetchResp.IssuanceLeaves[0].LeafNode.RootSum, + ) + require.EqualValues( + t.t, rpcAsset.Amount, + peerFetchResp.IgnoreSubtreeRoot.RootNode.RootSum, + ) } // AssertInclusionProof checks that the inclusion proof for a given leaf key From aa7dff3b3d953b1fa1420c9b169bf32b65d302e1 Mon Sep 17 00:00:00 2001 From: ffranr Date: Thu, 11 Sep 2025 15:29:09 +0100 Subject: [PATCH 23/23] docs: add release notes --- docs/release-notes/release-notes-0.7.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/release-notes/release-notes-0.7.0.md b/docs/release-notes/release-notes-0.7.0.md index cd0b35b9b..c0d8bd30e 100644 --- a/docs/release-notes/release-notes-0.7.0.md +++ b/docs/release-notes/release-notes-0.7.0.md @@ -73,6 +73,7 @@ - https://github.com/lightninglabs/taproot-assets/pull/1675 - https://github.com/lightninglabs/taproot-assets/pull/1674 - https://github.com/lightninglabs/taproot-assets/pull/1784 + - https://github.com/lightninglabs/taproot-assets/pull/1777 - A new [address version 2 was introduced that supports grouped assets and custom (sender-defined)