Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
74da783
tapdb: migrate supply tables to use 32-byte group keys
ffranr Sep 10, 2025
f1bfb7a
tapdb: use schnorr.ParsePubKey/SerializePubKey for group key columns
ffranr Sep 11, 2025
74aaae2
supplyverifier: compare group pub keys by equivalence not equality
ffranr Sep 11, 2025
8519931
mssmt: add helper function NewProofFromCompressedBytes
ffranr Sep 9, 2025
fdd48ec
supplysync_rpc: add support for FetchSupplyCommit RPC endpoint
ffranr Aug 29, 2025
bdebf6b
supplyverifier: add supply commit pull functionality to syncer
ffranr Aug 29, 2025
f984d6a
supplycommit: refactor by introducing startAssetSM method
ffranr Aug 29, 2025
8d13b75
supplyverifier: refactor by introducing startAssetSM method
ffranr Aug 29, 2025
f102c40
tapdb: add TapAddressBook.FetchSupplyCommitAssets
ffranr Aug 29, 2025
4215821
tapdb: add SQL query QueryLatestSupplyCommitment
ffranr Sep 1, 2025
adb6513
tapdb: add method FetchLatestCommitment to SupplyCommitMachine db store
ffranr Sep 1, 2025
9e298a9
supplyverifier: init state machines for asset groups with commitments
ffranr Aug 29, 2025
fbf60f1
supplycommit: refactor asset check for reuse in supplyverifier package
ffranr Sep 9, 2025
c4b21fd
supplyverifier: validate asset group before starting state machine
ffranr Sep 1, 2025
1703846
supplyverifier: remove state persistence; state can be regenerated
ffranr Sep 1, 2025
98e2697
supplyverifier+tapcfg: add supply verifier manager config validation
ffranr Sep 10, 2025
ad3e985
supplyverifier: monitor universe syncer events to start state machines
ffranr Sep 9, 2025
fb3db91
supplycommit: use provided asset ID in metadata lookup if set
ffranr Sep 10, 2025
d0e9b79
supplycommit: reduce log level of tx/commit output to trace/debug
ffranr Sep 11, 2025
9cc7019
supplycommit: avoid mock variable name shadowing with outer scope
ffranr Sep 11, 2025
056591e
supplyverifier: rewrite supply verifier state machine
ffranr Sep 11, 2025
e0292dd
itest: extend testSupplyCommitIgnoreAsset to check peer commit retrieval
ffranr Sep 11, 2025
aa7dff3
docs: add release notes
ffranr Sep 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/release-notes/release-notes-0.7.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
- https://github.com/lightninglabs/taproot-assets/pull/1675
- https://github.com/lightninglabs/taproot-assets/pull/1674
- https://github.com/lightninglabs/taproot-assets/pull/1784
- https://github.com/lightninglabs/taproot-assets/pull/1777

- A new [address version 2 was introduced that supports grouped assets and
custom (sender-defined)
Expand Down
55 changes: 55 additions & 0 deletions itest/supply_commit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package itest
import (
"bytes"
"context"
"strings"
"time"

"github.com/btcsuite/btcd/btcec/v2"
Expand Down Expand Up @@ -586,6 +587,60 @@ func testSupplyCommitIgnoreAsset(t *harnessTest) {
t.t, rpcAsset.Amount,
uniFetchResp.IgnoreSubtreeRoot.RootNode.RootSum,
)

t.Log("Attempting to fetch supply commit from secondary node")

var peerFetchResp *unirpc.FetchSupplyCommitResponse
require.Eventually(t.t, func() bool {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should wrap this in a helper assertion (optional VerifyFirst) and apply it to the other test as well, since that creates several supply commitments.

// nolint: lll
peerFetchResp, err = secondTapd.FetchSupplyCommit(
ctxb, &unirpc.FetchSupplyCommitRequest{
GroupKey: &unirpc.FetchSupplyCommitRequest_GroupKeyBytes{
GroupKeyBytes: groupKeyBytes,
},
Locator: &unirpc.FetchSupplyCommitRequest_VeryFirst{
VeryFirst: true,
},
},
)
if err != nil &&
strings.Contains(err.Error(), "commitment not found") {

return false
}
require.NoError(t.t, err)

// If the fetch response has no block height or hash,
// it means that the supply commitment transaction has not
// been mined yet, so we should retry.
if peerFetchResp.ChainData.BlockHeight == 0 ||
len(peerFetchResp.ChainData.BlockHash) == 0 {

return false
}

// Once the ignore tree includes the ignored asset outpoint, we
// know that the supply commitment has been updated.
if peerFetchResp.IgnoreSubtreeRoot == nil {
return false
}

return true
}, defaultWaitTimeout, time.Second)

require.NotNil(t.t, peerFetchResp)
require.Len(t.t, peerFetchResp.IssuanceLeaves, 1)
require.Len(t.t, peerFetchResp.BurnLeaves, 0)
require.Len(t.t, peerFetchResp.IgnoreLeaves, 2)

require.EqualValues(
t.t, rpcAsset.Amount,
peerFetchResp.IssuanceLeaves[0].LeafNode.RootSum,
)
require.EqualValues(
t.t, rpcAsset.Amount,
peerFetchResp.IgnoreSubtreeRoot.RootNode.RootSum,
)
}

// AssertInclusionProof checks that the inclusion proof for a given leaf key
Expand Down
15 changes: 15 additions & 0 deletions mssmt/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"fmt"
)

const (
Expand All @@ -23,6 +24,20 @@ var (
// NodeHash represents the key of a MS-SMT node.
type NodeHash [hashSize]byte

// NewNodeHashFromBytes creates a new NodeHash from a byte slice.
func NewNodeHashFromBytes(b []byte) (NodeHash, error) {
var zero NodeHash

if len(b) != hashSize {
return zero, fmt.Errorf("invalid hash size: %d", len(b))
}

var h NodeHash
copy(h[:], b)

return h, nil
}

// String returns a NodeHash as a hex-encoded string.
func (k NodeHash) String() string {
return hex.EncodeToString(k[:])
Expand Down
33 changes: 33 additions & 0 deletions mssmt/proof.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package mssmt

import (
"bytes"
"errors"
"fmt"

Expand Down Expand Up @@ -41,6 +42,38 @@ func NewProof(nodes []Node) *Proof {
}
}

// NewProofFromCompressedBytes initializes a new merkle proof from its
// compressed byte representation.
func NewProofFromCompressedBytes(compressedProofBytes []byte) (Proof, error) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you aware that we already have this?

// Decode decodes the compressed proof encoded within Reader.
func (p *CompressedProof) Decode(r io.Reader) error {
var numNodes uint16
if err := binary.Read(r, byteOrder, &numNodes); err != nil {
return err
}
nodes := make([]Node, 0, numNodes)
for i := uint16(0); i < numNodes; i++ {
var keyBytes [sha256.Size]byte
if _, err := r.Read(keyBytes[:]); err != nil {
return err
}
var sum uint64
if err := binary.Read(r, byteOrder, &sum); err != nil {
return err
}
nodes = append(nodes, NewComputedNode(NodeHash(keyBytes), sum))
}
var bitsBytes [MaxTreeLevels / 8]byte
if _, err := r.Read(bitsBytes[:]); err != nil {
return err
}
bits := UnpackBits(bitsBytes[:])
*p = CompressedProof{
Bits: bits,
Nodes: nodes,
}
return nil
}

var zero Proof

if len(compressedProofBytes) == 0 {
return zero, fmt.Errorf("compressed proof bytes are empty")
}

var compressedProof CompressedProof
reader := bytes.NewReader(compressedProofBytes)
if err := compressedProof.Decode(reader); err != nil {
return zero, fmt.Errorf("decode compressed proof: %w", err)
}

// Fail if extra data follows a valid proof encoding.
if remaining := reader.Len(); remaining != 0 {
return zero, fmt.Errorf("trailing data after compressed "+
"proof: %d bytes", remaining)
}

p, err := compressedProof.Decompress()
if err != nil {
return zero, fmt.Errorf("decompress proof: %w", err)
}
if p == nil {
return zero, fmt.Errorf("decompressor returned nil proof")
}

return *p, nil
}

// Root returns the root node obtained by walking up the tree.
func (p Proof) Root(key [32]byte, leaf Node) *BranchNode {
// Note that we don't need to check the error here since the only point
Expand Down
Loading
Loading