From 6f9a19bb0aee35afc47d69b881f5635233256372 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:43:18 -0700 Subject: [PATCH 1/8] universe: add Encode+Decode methods for BurnLeaf --- universe/interface.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/universe/interface.go b/universe/interface.go index a999488f4..d4a550005 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "encoding/hex" "fmt" + "io" "net" "strconv" "time" @@ -1295,6 +1296,31 @@ func (b *BurnLeaf) UniverseLeafNode() (*mssmt.LeafNode, error) { return mssmt.NewLeafNode(rawProofBytes, b.BurnProof.Asset.Amount), nil } +// Encode encodes the burn leaf into the target writer. +func (b *BurnLeaf) Encode(w io.Writer) error { + return b.BurnProof.Encode(w) +} + +// Decode decodes the burn leaf from the target reader. +func (b *BurnLeaf) Decode(r io.Reader) error { + burnProof := new(proof.Proof) + if err := burnProof.Decode(r); err != nil { + return fmt.Errorf("unable to decode burn proof: %w", err) + } + + b.BurnProof = burnProof + + b.UniverseKey = AssetLeafKey{ + BaseLeafKey: BaseLeafKey{ + OutPoint: b.BurnProof.OutPoint(), + ScriptKey: &b.BurnProof.Asset.ScriptKey, + }, + AssetID: b.BurnProof.Asset.ID(), + } + + return nil +} + // AuthenticatedBurnLeaf is a type that represents a burn leaf within the // Universe tree. This includes the MS-SMT inclusion proofs. type AuthenticatedBurnLeaf struct { @@ -1360,3 +1386,9 @@ type BurnTree interface { // ListBurns attempts to list all burn leaves for the given asset. ListBurns(context.Context, asset.Specifier) ListBurnsResp } + +// UniverseLeaf is an interface that allows a caller to query for the leaf node +// of a given Universe tree. +type UniverseLeaf interface { + UniverseLeafNode() (*mssmt.LeafNode, error) +} From 000226c21631647c07cc7ead36e834b8386fe09a Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:46:38 -0700 Subject: [PATCH 2/8] tapdb/sqlc: add new migration for the persistence layer of the supply tree state machine In this commit, we add a new migration for the persistence layer of the supply tree state machine. We track a state machine that points to the latest supply commitment. We update the supply commit via a new state transition, which references a series of updates. Once we're ready to apply, we'll apply the updates, mark the transition as finalized, and finally update the relevant pointers. We also make a change to modify mint_anchor_uni_commitments to point to a supply commitment. This lets us keep track of the set of pre commitments that aren't yet spent. --- tapdb/migrations.go | 2 +- .../migrations/000040_asset_commit.down.sql | 17 +++ .../migrations/000040_asset_commit.up.sql | 133 ++++++++++++++++++ 3 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 tapdb/sqlc/migrations/000040_asset_commit.down.sql create mode 100644 tapdb/sqlc/migrations/000040_asset_commit.up.sql diff --git a/tapdb/migrations.go b/tapdb/migrations.go index ba06ffbd6..43b9a045e 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -24,7 +24,7 @@ const ( // daemon. // // NOTE: This MUST be updated when a new migration is added. - LatestMigrationVersion = 39 + LatestMigrationVersion = 40 ) // DatabaseBackend is an interface that contains all methods our different diff --git a/tapdb/sqlc/migrations/000040_asset_commit.down.sql b/tapdb/sqlc/migrations/000040_asset_commit.down.sql new file mode 100644 index 000000000..4bf7afec3 --- /dev/null +++ b/tapdb/sqlc/migrations/000040_asset_commit.down.sql @@ -0,0 +1,17 @@ +DROP INDEX IF EXISTS supply_commit_transitions_single_pending_idx; +DROP INDEX IF EXISTS supply_update_events_transition_id_idx; +DROP INDEX IF EXISTS supply_commit_transitions_state_machine_group_key_idx; +DROP INDEX IF EXISTS supply_commitments_chain_txn_id_idx; +DROP INDEX IF EXISTS supply_commitments_group_key_idx; + +DROP TABLE IF EXISTS supply_update_events; +DROP TABLE IF EXISTS supply_commit_transitions; +DROP TABLE IF EXISTS supply_commit_state_machines; +DROP TABLE IF EXISTS supply_commitments; +DROP TABLE IF EXISTS supply_commit_update_types; +DROP TABLE IF EXISTS supply_commit_states; + +ALTER TABLE mint_anchor_uni_commitments DROP COLUMN spent_by; + +ALTER TABLE supply_commitments DROP COLUMN supply_root_hash; +ALTER TABLE supply_commitments DROP COLUMN supply_root_sum; diff --git a/tapdb/sqlc/migrations/000040_asset_commit.up.sql b/tapdb/sqlc/migrations/000040_asset_commit.up.sql new file mode 100644 index 000000000..c299538cc --- /dev/null +++ b/tapdb/sqlc/migrations/000040_asset_commit.up.sql @@ -0,0 +1,133 @@ +-- Enum-like table for state machine states. +CREATE TABLE supply_commit_states ( + id INTEGER PRIMARY KEY, + state_name TEXT UNIQUE NOT NULL +); + +-- Populate the possible states. +INSERT INTO supply_commit_states (id, state_name) VALUES + (0, 'DefaultState'), + (1, 'UpdatesPendingState'), + (2, 'CommitTreeCreateState'), + (3, 'CommitTxCreateState'), + (4, 'CommitTxSignState'), + (5, 'CommitBroadcastState'), + (6, 'CommitFinalizeState'); + +-- Enum-like table for supply update event types. +CREATE TABLE supply_commit_update_types ( + id INTEGER PRIMARY KEY, + update_type_name TEXT UNIQUE NOT NULL +); + +-- Populate the possible update types. +INSERT INTO supply_commit_update_types (id, update_type_name) VALUES + (0, 'mint'), + (1, 'burn'), + (2, 'ignore'); + +-- Table storing the details of a specific supply commitment (root and sub-trees). +-- This represents a committed state on chain. +CREATE TABLE supply_commitments ( + commit_id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this commitment belongs to. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The chain transaction that included this commitment. + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- The output index within the chain_txn_id transaction for the commitment. + output_index INTEGER, + + -- The internal key used for the commitment output. + internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The taproot output key used for the commitment output. + output_key BLOB NOT NULL CHECK(length(output_key) = 33), + + -- The block header of the block mining the commitment transaction. + block_header BLOB, + + -- The block height at which the commitment transaction was confirmed. + -- Can be NULL if the transaction is not yet confirmed. + block_height INTEGER, + + -- The merkle proof demonstrating the commitment's inclusion in the block. + merkle_proof BLOB, + + -- The root hash of the supply commitment at this snapshot. + supply_root_hash BLOB, + + -- The root sum of the supply commitment at this snapshot. + supply_root_sum BIGINT +); + +-- Main table tracking the state machine instance per asset group. +CREATE TABLE supply_commit_state_machines ( + -- The tweaked group key identifying the asset group's state machine. + group_key BLOB PRIMARY KEY CHECK(length(group_key) = 33), + + -- The current state of the state machine. + current_state_id INTEGER NOT NULL REFERENCES supply_commit_states(id), + + -- The latest successfully committed supply state on chain. + -- Can be NULL if no commitment has been made yet. + latest_commitment_id BIGINT REFERENCES supply_commitments(commit_id) +); + +-- Table tracking a pending state transition for a state machine. +CREATE TABLE supply_commit_transitions ( + transition_id INTEGER PRIMARY KEY, + + -- Reference back to the state machine this transition belongs to. + state_machine_group_key BLOB NOT NULL REFERENCES supply_commit_state_machines(group_key), + + -- The commitment being replaced by this transition. + -- Can be NULL if this is the first commitment. + old_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The new commitment that this transition aims to create. + -- Can be NULL initially, before the commitment details are created. + new_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The chain transaction that, once confirmed, will finalize this transition. + -- Can be NULL until the transaction is created and signed. + pending_commit_txn_id BIGINT REFERENCES chain_txns(txn_id), + + -- Indicates if this transition has been successfully completed and committed. + finalized BOOLEAN NOT NULL DEFAULT FALSE, + + -- Timestamp when this transition was initiated (unix timestamp in seconds). + creation_time BIGINT NOT NULL +); + +-- Table storing individual update events associated with a pending transition. +CREATE TABLE supply_update_events ( + event_id INTEGER PRIMARY KEY, + + -- Reference to the state transition this event is part of. + transition_id BIGINT NOT NULL REFERENCES supply_commit_transitions(transition_id) ON DELETE CASCADE, + + -- The type of update (mint, burn, ignore). + update_type_id INTEGER NOT NULL REFERENCES supply_commit_update_types(id), + + -- Opaque blob containing the serialized data for the specific + -- SupplyUpdateEvent (NewMintEvent, NewBurnEvent, NewIgnoreEvent). + event_data BLOB NOT NULL +); + +-- In order to be able to easily fetch the set of unspent pre-commitment +-- outputs, we'll add a new spent_by field to mint_anchor_uni_commitments. +ALTER TABLE mint_anchor_uni_commitments + ADD COLUMN spent_by BIGINT REFERENCES supply_commitments(commit_id); + +-- Add indexes for frequent lookups. +CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); +CREATE INDEX supply_commit_transitions_state_machine_group_key_idx ON supply_commit_transitions(state_machine_group_key); +CREATE INDEX supply_update_events_transition_id_idx ON supply_update_events(transition_id); +CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); + +-- Ensure only one non-finalized transition exists per state machine group key. +CREATE UNIQUE INDEX supply_commit_transitions_single_pending_idx + ON supply_commit_transitions (state_machine_group_key) WHERE finalized = FALSE; From b8979dafca40ab11e35ac98cd552a36c60fefb7a Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:46:57 -0700 Subject: [PATCH 3/8] tapdb/sqlc: update mint_anchor_uni_commitments queries to account for new spent_by field --- tapdb/sqlc/queries/assets.sql | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tapdb/sqlc/queries/assets.sql b/tapdb/sqlc/queries/assets.sql index 536b868db..bc6427381 100644 --- a/tapdb/sqlc/queries/assets.sql +++ b/tapdb/sqlc/queries/assets.sql @@ -1040,10 +1040,10 @@ JOIN assets_meta ON assets.meta_data_id = assets_meta.meta_id WHERE assets.asset_id = $1; +-- name: UpsertMintAnchorUniCommitment :one -- Upsert a record into the mint_anchor_uni_commitments table. -- If a record with the same batch ID and tx output index already exists, update -- the existing record. Otherwise, insert a new record. --- name: UpsertMintAnchorUniCommitment :one WITH target_batch AS ( -- This CTE is used to fetch the ID of a batch, based on the serialized -- internal key associated with the batch. @@ -1052,11 +1052,11 @@ WITH target_batch AS ( WHERE keys.raw_key = @batch_key ) INSERT INTO mint_anchor_uni_commitments ( - batch_id, tx_output_index, taproot_internal_key_id, group_key + batch_id, tx_output_index, taproot_internal_key_id, group_key, spent_by ) VALUES ( - (SELECT batch_id FROM target_batch), @tx_output_index, - @taproot_internal_key_id, @group_key + (SELECT batch_id FROM target_batch), @tx_output_index, + @taproot_internal_key_id, @group_key, sqlc.narg('spent_by') ) ON CONFLICT(batch_id, tx_output_index) DO UPDATE SET -- The following fields are updated if a conflict occurs. @@ -1064,14 +1064,15 @@ ON CONFLICT(batch_id, tx_output_index) DO UPDATE SET group_key = EXCLUDED.group_key RETURNING id; +-- name: FetchMintAnchorUniCommitment :many -- Fetch records from the mint_anchor_uni_commitments table with optional -- filtering. --- name: FetchMintAnchorUniCommitment :many SELECT mint_anchor_uni_commitments.id, mint_anchor_uni_commitments.batch_id, mint_anchor_uni_commitments.tx_output_index, mint_anchor_uni_commitments.group_key, + mint_anchor_uni_commitments.spent_by, batch_internal_keys.raw_key AS batch_key, mint_anchor_uni_commitments.taproot_internal_key_id, sqlc.embed(taproot_internal_keys) From 48e6c153ac618d631ac8868106efd229b67b7d1f Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:47:13 -0700 Subject: [PATCH 4/8] tapdb/sqlc: add new supply_commit.sql queries --- tapdb/sqlc/queries/supply_commit.sql | 192 +++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 tapdb/sqlc/queries/supply_commit.sql diff --git a/tapdb/sqlc/queries/supply_commit.sql b/tapdb/sqlc/queries/supply_commit.sql new file mode 100644 index 000000000..e97b2d067 --- /dev/null +++ b/tapdb/sqlc/queries/supply_commit.sql @@ -0,0 +1,192 @@ +-- name: UpsertSupplyCommitStateMachine :one +WITH target_state AS ( + -- Select the ID for the provided state name, if it exists. + SELECT id + FROM supply_commit_states s1 + WHERE s1.state_name = sqlc.narg('state_name') +), default_state AS ( + -- Select the ID for the 'DefaultState'. + SELECT id + FROM supply_commit_states s2 + WHERE s2.state_name = 'DefaultState' +) +INSERT INTO supply_commit_state_machines ( + group_key, current_state_id, latest_commitment_id +) VALUES ( + @group_key, + -- Use the target state ID if found, otherwise use the default state ID. + coalesce((SELECT id FROM target_state), (SELECT id FROM default_state)), + sqlc.narg('latest_commitment_id') +) +ON CONFLICT (group_key) +DO UPDATE SET + -- Update state ID only if a target state ID was found, otherwise keep existing. + current_state_id = coalesce((SELECT id FROM target_state), supply_commit_state_machines.current_state_id), + latest_commitment_id = coalesce(sqlc.narg('latest_commitment_id'), supply_commit_state_machines.latest_commitment_id) +-- Return the ID of the state that was actually set (either inserted or updated), +-- and the latest commitment ID that was set. +RETURNING current_state_id, latest_commitment_id; + +-- name: InsertSupplyCommitment :one +INSERT INTO supply_commitments ( + group_key, chain_txn_id, + output_index, internal_key_id, output_key, -- Core fields + block_height, block_header, merkle_proof, -- Nullable chain details + supply_root_hash, supply_root_sum -- Nullable root details +) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 +) RETURNING commit_id; + +-- name: UpdateSupplyCommitmentChainDetails :exec +UPDATE supply_commitments +SET merkle_proof = @merkle_proof, + output_index = @output_index, + block_header = @block_header, + chain_txn_id = @chain_txn_id, + block_height = @block_height +WHERE commit_id = @commit_id; + +-- name: UpdateSupplyCommitmentRoot :exec +UPDATE supply_commitments +SET supply_root_hash = @supply_root_hash, + supply_root_sum = @supply_root_sum +WHERE commit_id = @commit_id; + +-- name: InsertSupplyCommitTransition :one +INSERT INTO supply_commit_transitions ( + state_machine_group_key, old_commitment_id, new_commitment_id, + pending_commit_txn_id, finalized, creation_time +) VALUES ( + $1, $2, $3, $4, $5, $6 +) RETURNING transition_id; + +-- name: FinalizeSupplyCommitTransition :exec +UPDATE supply_commit_transitions +SET finalized = TRUE +WHERE transition_id = @transition_id; + +-- name: InsertSupplyUpdateEvent :exec +INSERT INTO supply_update_events ( + transition_id, update_type_id, event_data +) VALUES ( + $1, $2, $3 +); + +-- name: QuerySupplyCommitStateMachine :one +SELECT + sm.group_key, + sm.current_state_id, + states.state_name, + sm.latest_commitment_id +FROM supply_commit_state_machines sm +JOIN supply_commit_states states + ON sm.current_state_id = states.id +WHERE sm.group_key = @group_key; + +-- name: QueryPendingSupplyCommitTransition :one +WITH target_machine AS ( + SELECT group_key + FROM supply_commit_state_machines + WHERE group_key = @group_key +) +SELECT + t.transition_id, + t.state_machine_group_key, + t.old_commitment_id, + t.new_commitment_id, + t.pending_commit_txn_id, + t.finalized, + t.creation_time +FROM supply_commit_transitions t +JOIN target_machine tm + ON t.state_machine_group_key = tm.group_key +WHERE t.finalized = FALSE +ORDER BY t.creation_time DESC +LIMIT 1; + +-- name: QuerySupplyUpdateEvents :many +SELECT + ue.event_id, + ue.transition_id, + ue.update_type_id, + types.update_type_name, + ue.event_data +FROM supply_update_events ue +JOIN supply_commit_update_types types + ON ue.update_type_id = types.id +WHERE ue.transition_id = @transition_id +ORDER BY ue.event_id ASC; + +-- name: QuerySupplyCommitment :one +SELECT * +FROM supply_commitments +WHERE commit_id = @commit_id; + +-- name: UpdateSupplyCommitTransitionCommitment :exec +UPDATE supply_commit_transitions +SET new_commitment_id = @new_commitment_id, + pending_commit_txn_id = @pending_commit_txn_id +WHERE transition_id = @transition_id; + +-- name: DeleteSupplyCommitTransition :exec +DELETE FROM supply_commit_transitions +WHERE transition_id = @transition_id; + +-- name: DeleteSupplyUpdateEvents :exec +DELETE FROM supply_update_events +WHERE transition_id = @transition_id; + +-- name: FetchUnspentPrecommits :many +SELECT + mac.tx_output_index, + ik.raw_key AS taproot_internal_key, + mac.group_key, + mint_txn.block_height, + mint_txn.raw_tx +FROM mint_anchor_uni_commitments mac +JOIN asset_minting_batches amb ON mac.batch_id = amb.batch_id +JOIN genesis_points gp ON amb.genesis_id = gp.genesis_id +JOIN chain_txns mint_txn ON gp.anchor_tx_id = mint_txn.txn_id +JOIN internal_keys ik ON mac.taproot_internal_key_id = ik.key_id +LEFT JOIN supply_commitments sc ON mac.spent_by = sc.commit_id +LEFT JOIN chain_txns commit_txn ON sc.chain_txn_id = commit_txn.txn_id +WHERE + mac.group_key = @group_key AND + (mac.spent_by IS NULL OR commit_txn.block_hash IS NULL); + +-- name: FetchSupplyCommit :one +SELECT + sc.commit_id, + sc.output_index, + sc.output_key, + ik.raw_key AS internal_key, + txn.raw_tx, + sc.supply_root_hash AS root_hash, + sc.supply_root_sum AS root_sum +FROM supply_commit_state_machines sm +JOIN supply_commitments sc + ON sm.latest_commitment_id = sc.commit_id +JOIN chain_txns txn + ON sc.chain_txn_id = txn.txn_id +JOIN internal_keys ik + ON sc.internal_key_id = ik.key_id +WHERE + sm.group_key = @group_key AND + txn.block_hash IS NOT NULL; + +-- name: QueryExistingPendingTransition :one +-- Find the ID of an existing non-finalized transition for the group key +SELECT transition_id +FROM supply_commit_transitions sct +WHERE sct.state_machine_group_key = @group_key AND finalized = FALSE +LIMIT 1; + +-- name: FetchInternalKeyByID :one +SELECT raw_key, key_family, key_index +FROM internal_keys +WHERE key_id = @key_id; + +-- name: FetchChainTxByID :one +SELECT raw_tx, block_height -- Include block_height needed by FetchState +FROM chain_txns +WHERE txn_id = @txn_id; From 4baffdfe6b5dbf619474c4ce2475de3f5f51aef9 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:47:45 -0700 Subject: [PATCH 5/8] tapdb/sqlc: generate sqlc structs --- tapdb/sqlc/assets.sql.go | 13 +- tapdb/sqlc/models.go | 48 +++ tapdb/sqlc/querier.go | 22 + tapdb/sqlc/schemas/generated_schema.sql | 109 ++++- tapdb/sqlc/supply_commit.sql.go | 548 ++++++++++++++++++++++++ 5 files changed, 735 insertions(+), 5 deletions(-) create mode 100644 tapdb/sqlc/supply_commit.sql.go diff --git a/tapdb/sqlc/assets.sql.go b/tapdb/sqlc/assets.sql.go index 23a5ea693..fdca73471 100644 --- a/tapdb/sqlc/assets.sql.go +++ b/tapdb/sqlc/assets.sql.go @@ -1534,6 +1534,7 @@ SELECT mint_anchor_uni_commitments.batch_id, mint_anchor_uni_commitments.tx_output_index, mint_anchor_uni_commitments.group_key, + mint_anchor_uni_commitments.spent_by, batch_internal_keys.raw_key AS batch_key, mint_anchor_uni_commitments.taproot_internal_key_id, taproot_internal_keys.key_id, taproot_internal_keys.raw_key, taproot_internal_keys.key_family, taproot_internal_keys.key_index @@ -1562,6 +1563,7 @@ type FetchMintAnchorUniCommitmentRow struct { BatchID int32 TxOutputIndex int32 GroupKey []byte + SpentBy sql.NullInt64 BatchKey []byte TaprootInternalKeyID int64 InternalKey InternalKey @@ -1583,6 +1585,7 @@ func (q *Queries) FetchMintAnchorUniCommitment(ctx context.Context, arg FetchMin &i.BatchID, &i.TxOutputIndex, &i.GroupKey, + &i.SpentBy, &i.BatchKey, &i.TaprootInternalKeyID, &i.InternalKey.KeyID, @@ -3121,14 +3124,14 @@ WITH target_batch AS ( -- internal key associated with the batch. SELECT keys.key_id AS batch_id FROM internal_keys keys - WHERE keys.raw_key = $4 + WHERE keys.raw_key = $5 ) INSERT INTO mint_anchor_uni_commitments ( - batch_id, tx_output_index, taproot_internal_key_id, group_key + batch_id, tx_output_index, taproot_internal_key_id, group_key, spent_by ) VALUES ( - (SELECT batch_id FROM target_batch), $1, - $2, $3 + (SELECT batch_id FROM target_batch), $1, + $2, $3, $4 ) ON CONFLICT(batch_id, tx_output_index) DO UPDATE SET -- The following fields are updated if a conflict occurs. @@ -3141,6 +3144,7 @@ type UpsertMintAnchorUniCommitmentParams struct { TxOutputIndex int32 TaprootInternalKeyID int64 GroupKey []byte + SpentBy sql.NullInt64 BatchKey []byte } @@ -3152,6 +3156,7 @@ func (q *Queries) UpsertMintAnchorUniCommitment(ctx context.Context, arg UpsertM arg.TxOutputIndex, arg.TaprootInternalKeyID, arg.GroupKey, + arg.SpentBy, arg.BatchKey, ) var id int64 diff --git a/tapdb/sqlc/models.go b/tapdb/sqlc/models.go index c56464faf..206d342f2 100644 --- a/tapdb/sqlc/models.go +++ b/tapdb/sqlc/models.go @@ -296,6 +296,7 @@ type MintAnchorUniCommitment struct { TxOutputIndex int32 GroupKey []byte TaprootInternalKeyID int64 + SpentBy sql.NullInt64 } type MssmtNode struct { @@ -357,6 +358,53 @@ type ScriptKey struct { KeyType sql.NullInt16 } +type SupplyCommitState struct { + ID int64 + StateName string +} + +type SupplyCommitStateMachine struct { + GroupKey []byte + CurrentStateID int32 + LatestCommitmentID sql.NullInt64 +} + +type SupplyCommitTransition struct { + TransitionID int64 + StateMachineGroupKey []byte + OldCommitmentID sql.NullInt64 + NewCommitmentID sql.NullInt64 + PendingCommitTxnID sql.NullInt64 + Finalized bool + CreationTime int64 +} + +type SupplyCommitUpdateType struct { + ID int64 + UpdateTypeName string +} + +type SupplyCommitment struct { + CommitID int64 + GroupKey []byte + ChainTxnID int64 + OutputIndex sql.NullInt32 + InternalKeyID int64 + OutputKey []byte + BlockHeader []byte + BlockHeight sql.NullInt32 + MerkleProof []byte + SupplyRootHash []byte + SupplyRootSum sql.NullInt64 +} + +type SupplyUpdateEvent struct { + EventID int64 + TransitionID int64 + UpdateTypeID int32 + EventData []byte +} + type TapscriptEdge struct { EdgeID int64 RootHashID int64 diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index 8ab3e6d68..8f4eefc27 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -35,6 +35,8 @@ type Querier interface { DeleteMultiverseLeaf(ctx context.Context, arg DeleteMultiverseLeafParams) error DeleteNode(ctx context.Context, arg DeleteNodeParams) (int64, error) DeleteRoot(ctx context.Context, namespace string) (int64, error) + DeleteSupplyCommitTransition(ctx context.Context, transitionID int64) error + DeleteSupplyUpdateEvents(ctx context.Context, transitionID int64) error DeleteTapscriptTreeEdges(ctx context.Context, rootHash []byte) error DeleteTapscriptTreeNodes(ctx context.Context) error DeleteTapscriptTreeRoot(ctx context.Context, rootHash []byte) error @@ -69,6 +71,7 @@ type Querier interface { FetchAssetsForBatch(ctx context.Context, rawKey []byte) ([]FetchAssetsForBatchRow, error) FetchAuthMailboxMessages(ctx context.Context, id int64) (FetchAuthMailboxMessagesRow, error) FetchChainTx(ctx context.Context, txid []byte) (ChainTxn, error) + FetchChainTxByID(ctx context.Context, txnID int64) (FetchChainTxByIDRow, error) FetchChildren(ctx context.Context, arg FetchChildrenParams) ([]FetchChildrenRow, error) FetchChildrenSelfJoin(ctx context.Context, arg FetchChildrenSelfJoinParams) ([]FetchChildrenSelfJoinRow, error) FetchGenesisByAssetID(ctx context.Context, assetID []byte) (GenesisInfoView, error) @@ -80,6 +83,7 @@ type Querier interface { // Sort and limit to return the genesis ID for initial genesis of the group. FetchGroupByGroupKey(ctx context.Context, groupKey []byte) (FetchGroupByGroupKeyRow, error) FetchGroupedAssets(ctx context.Context) ([]FetchGroupedAssetsRow, error) + FetchInternalKeyByID(ctx context.Context, keyID int64) (FetchInternalKeyByIDRow, error) FetchInternalKeyLocator(ctx context.Context, rawKey []byte) (FetchInternalKeyLocatorRow, error) FetchManagedUTXO(ctx context.Context, arg FetchManagedUTXOParams) (FetchManagedUTXORow, error) FetchManagedUTXOs(ctx context.Context) ([]FetchManagedUTXOsRow, error) @@ -95,6 +99,7 @@ type Querier interface { FetchSeedlingByID(ctx context.Context, seedlingID int64) (AssetSeedling, error) FetchSeedlingID(ctx context.Context, arg FetchSeedlingIDParams) (int64, error) FetchSeedlingsForBatch(ctx context.Context, rawKey []byte) ([]FetchSeedlingsForBatchRow, error) + FetchSupplyCommit(ctx context.Context, groupKey []byte) (FetchSupplyCommitRow, error) // Sort the nodes by node_index here instead of returning the indices. FetchTapscriptTree(ctx context.Context, rootHash []byte) ([]FetchTapscriptTreeRow, error) FetchTransferInputs(ctx context.Context, transferID int64) ([]FetchTransferInputsRow, error) @@ -103,6 +108,8 @@ type Querier interface { FetchUniverseRoot(ctx context.Context, namespace string) (FetchUniverseRootRow, error) FetchUniverseSupplyRoot(ctx context.Context, namespaceRoot string) (FetchUniverseSupplyRootRow, error) FetchUnknownTypeScriptKeys(ctx context.Context) ([]FetchUnknownTypeScriptKeysRow, error) + FetchUnspentPrecommits(ctx context.Context, groupKey []byte) ([]FetchUnspentPrecommitsRow, error) + FinalizeSupplyCommitTransition(ctx context.Context, transitionID int64) error GenesisAssets(ctx context.Context) ([]GenesisAsset, error) GenesisPoints(ctx context.Context) ([]GenesisPoint, error) GetRootKey(ctx context.Context, id []byte) (Macaroon, error) @@ -121,6 +128,9 @@ type Querier interface { InsertNewSyncEvent(ctx context.Context, arg InsertNewSyncEventParams) error InsertPassiveAsset(ctx context.Context, arg InsertPassiveAssetParams) error InsertRootKey(ctx context.Context, arg InsertRootKeyParams) error + InsertSupplyCommitTransition(ctx context.Context, arg InsertSupplyCommitTransitionParams) (int64, error) + InsertSupplyCommitment(ctx context.Context, arg InsertSupplyCommitmentParams) (int64, error) + InsertSupplyUpdateEvent(ctx context.Context, arg InsertSupplyUpdateEventParams) error InsertTxProof(ctx context.Context, arg InsertTxProofParams) error InsertUniverseServer(ctx context.Context, arg InsertUniverseServerParams) error LogProofTransferAttempt(ctx context.Context, arg LogProofTransferAttemptParams) error @@ -148,6 +158,8 @@ type Querier interface { QueryAuthMailboxMessages(ctx context.Context, arg QueryAuthMailboxMessagesParams) ([]QueryAuthMailboxMessagesRow, error) QueryBurns(ctx context.Context, arg QueryBurnsParams) ([]QueryBurnsRow, error) QueryEventIDs(ctx context.Context, arg QueryEventIDsParams) ([]QueryEventIDsRow, error) + // Find the ID of an existing non-finalized transition for the group key + QueryExistingPendingTransition(ctx context.Context, groupKey []byte) (int64, error) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]FederationGlobalSyncConfig, error) // Join on mssmt_nodes to get leaf related fields. // Join on genesis_info_view to get leaf related fields. @@ -155,7 +167,11 @@ type Querier interface { QueryFederationUniSyncConfigs(ctx context.Context) ([]QueryFederationUniSyncConfigsRow, error) QueryMultiverseLeaves(ctx context.Context, arg QueryMultiverseLeavesParams) ([]QueryMultiverseLeavesRow, error) QueryPassiveAssets(ctx context.Context, transferID int64) ([]QueryPassiveAssetsRow, error) + QueryPendingSupplyCommitTransition(ctx context.Context, groupKey []byte) (SupplyCommitTransition, error) QueryProofTransferAttempts(ctx context.Context, arg QueryProofTransferAttemptsParams) ([]time.Time, error) + QuerySupplyCommitStateMachine(ctx context.Context, groupKey []byte) (QuerySupplyCommitStateMachineRow, error) + QuerySupplyCommitment(ctx context.Context, commitID int64) (SupplyCommitment, error) + QuerySupplyUpdateEvents(ctx context.Context, transitionID int64) ([]QuerySupplyUpdateEventsRow, error) // TODO(roasbeef): use the universe id instead for the grouping? so namespace // root, simplifies queries QueryUniverseAssetStats(ctx context.Context, arg QueryUniverseAssetStatsParams) ([]QueryUniverseAssetStatsRow, error) @@ -171,6 +187,9 @@ type Querier interface { UniverseRoots(ctx context.Context, arg UniverseRootsParams) ([]UniverseRootsRow, error) UpdateBatchGenesisTx(ctx context.Context, arg UpdateBatchGenesisTxParams) error UpdateMintingBatchState(ctx context.Context, arg UpdateMintingBatchStateParams) error + UpdateSupplyCommitTransitionCommitment(ctx context.Context, arg UpdateSupplyCommitTransitionCommitmentParams) error + UpdateSupplyCommitmentChainDetails(ctx context.Context, arg UpdateSupplyCommitmentChainDetailsParams) error + UpdateSupplyCommitmentRoot(ctx context.Context, arg UpdateSupplyCommitmentRootParams) error UpdateUTXOLease(ctx context.Context, arg UpdateUTXOLeaseParams) error UpsertAddr(ctx context.Context, arg UpsertAddrParams) (int64, error) UpsertAddrEvent(ctx context.Context, arg UpsertAddrEventParams) (int64, error) @@ -196,6 +215,9 @@ type Querier interface { UpsertMultiverseRoot(ctx context.Context, arg UpsertMultiverseRootParams) (int64, error) UpsertRootNode(ctx context.Context, arg UpsertRootNodeParams) error UpsertScriptKey(ctx context.Context, arg UpsertScriptKeyParams) (int64, error) + // Return the ID of the state that was actually set (either inserted or updated), + // and the latest commitment ID that was set. + UpsertSupplyCommitStateMachine(ctx context.Context, arg UpsertSupplyCommitStateMachineParams) (UpsertSupplyCommitStateMachineRow, error) UpsertTapscriptTreeEdge(ctx context.Context, arg UpsertTapscriptTreeEdgeParams) (int64, error) UpsertTapscriptTreeNode(ctx context.Context, rawNode []byte) (int64, error) UpsertTapscriptTreeRootHash(ctx context.Context, arg UpsertTapscriptTreeRootHashParams) (int64, error) diff --git a/tapdb/sqlc/schemas/generated_schema.sql b/tapdb/sqlc/schemas/generated_schema.sql index d32692b2d..80b930b85 100644 --- a/tapdb/sqlc/schemas/generated_schema.sql +++ b/tapdb/sqlc/schemas/generated_schema.sql @@ -603,7 +603,7 @@ CREATE TABLE mint_anchor_uni_commitments ( group_key BLOB , taproot_internal_key_id BIGINT REFERENCES internal_keys(key_id) -NOT NULL); +NOT NULL, spent_by BIGINT REFERENCES supply_commitments(commit_id)); CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique ON mint_anchor_uni_commitments (batch_id, tx_output_index); @@ -753,6 +753,113 @@ CREATE TABLE script_keys ( CREATE INDEX status_idx ON addr_events(status); +CREATE TABLE supply_commit_state_machines ( + -- The tweaked group key identifying the asset group's state machine. + group_key BLOB PRIMARY KEY CHECK(length(group_key) = 33), + + -- The current state of the state machine. + current_state_id INTEGER NOT NULL REFERENCES supply_commit_states(id), + + -- The latest successfully committed supply state on chain. + -- Can be NULL if no commitment has been made yet. + latest_commitment_id BIGINT REFERENCES supply_commitments(commit_id) +); + +CREATE TABLE supply_commit_states ( + id INTEGER PRIMARY KEY, + state_name TEXT UNIQUE NOT NULL +); + +CREATE TABLE supply_commit_transitions ( + transition_id INTEGER PRIMARY KEY, + + -- Reference back to the state machine this transition belongs to. + state_machine_group_key BLOB NOT NULL REFERENCES supply_commit_state_machines(group_key), + + -- The commitment being replaced by this transition. + -- Can be NULL if this is the first commitment. + old_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The new commitment that this transition aims to create. + -- Can be NULL initially, before the commitment details are created. + new_commitment_id BIGINT REFERENCES supply_commitments(commit_id), + + -- The chain transaction that, once confirmed, will finalize this transition. + -- Can be NULL until the transaction is created and signed. + pending_commit_txn_id BIGINT REFERENCES chain_txns(txn_id), + + -- Indicates if this transition has been successfully completed and committed. + finalized BOOLEAN NOT NULL DEFAULT FALSE, + + -- Timestamp when this transition was initiated (unix timestamp in seconds). + creation_time BIGINT NOT NULL +); + +CREATE UNIQUE INDEX supply_commit_transitions_single_pending_idx + ON supply_commit_transitions (state_machine_group_key) WHERE finalized = FALSE; + +CREATE INDEX supply_commit_transitions_state_machine_group_key_idx ON supply_commit_transitions(state_machine_group_key); + +CREATE TABLE supply_commit_update_types ( + id INTEGER PRIMARY KEY, + update_type_name TEXT UNIQUE NOT NULL +); + +CREATE TABLE supply_commitments ( + commit_id INTEGER PRIMARY KEY, + + -- The tweaked group key identifying the asset group this commitment belongs to. + group_key BLOB NOT NULL CHECK(length(group_key) = 33), + + -- The chain transaction that included this commitment. + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + -- The output index within the chain_txn_id transaction for the commitment. + output_index INTEGER, + + -- The internal key used for the commitment output. + internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + + -- The taproot output key used for the commitment output. + output_key BLOB NOT NULL CHECK(length(output_key) = 33), + + -- The block header of the block mining the commitment transaction. + block_header BLOB, + + -- The block height at which the commitment transaction was confirmed. + -- Can be NULL if the transaction is not yet confirmed. + block_height INTEGER, + + -- The merkle proof demonstrating the commitment's inclusion in the block. + merkle_proof BLOB, + + -- The root hash of the supply commitment at this snapshot. + supply_root_hash BLOB, + + -- The root sum of the supply commitment at this snapshot. + supply_root_sum BIGINT +); + +CREATE INDEX supply_commitments_chain_txn_id_idx ON supply_commitments(chain_txn_id); + +CREATE INDEX supply_commitments_group_key_idx ON supply_commitments(group_key); + +CREATE TABLE supply_update_events ( + event_id INTEGER PRIMARY KEY, + + -- Reference to the state transition this event is part of. + transition_id BIGINT NOT NULL REFERENCES supply_commit_transitions(transition_id) ON DELETE CASCADE, + + -- The type of update (mint, burn, ignore). + update_type_id INTEGER NOT NULL REFERENCES supply_commit_update_types(id), + + -- Opaque blob containing the serialized data for the specific + -- SupplyUpdateEvent (NewMintEvent, NewBurnEvent, NewIgnoreEvent). + event_data BLOB NOT NULL +); + +CREATE INDEX supply_update_events_transition_id_idx ON supply_update_events(transition_id); + CREATE TABLE tapscript_edges ( edge_id INTEGER PRIMARY KEY, diff --git a/tapdb/sqlc/supply_commit.sql.go b/tapdb/sqlc/supply_commit.sql.go new file mode 100644 index 000000000..c9d5b0170 --- /dev/null +++ b/tapdb/sqlc/supply_commit.sql.go @@ -0,0 +1,548 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: supply_commit.sql + +package sqlc + +import ( + "context" + "database/sql" +) + +const DeleteSupplyCommitTransition = `-- name: DeleteSupplyCommitTransition :exec +DELETE FROM supply_commit_transitions +WHERE transition_id = $1 +` + +func (q *Queries) DeleteSupplyCommitTransition(ctx context.Context, transitionID int64) error { + _, err := q.db.ExecContext(ctx, DeleteSupplyCommitTransition, transitionID) + return err +} + +const DeleteSupplyUpdateEvents = `-- name: DeleteSupplyUpdateEvents :exec +DELETE FROM supply_update_events +WHERE transition_id = $1 +` + +func (q *Queries) DeleteSupplyUpdateEvents(ctx context.Context, transitionID int64) error { + _, err := q.db.ExecContext(ctx, DeleteSupplyUpdateEvents, transitionID) + return err +} + +const FetchChainTxByID = `-- name: FetchChainTxByID :one +SELECT raw_tx, block_height -- Include block_height needed by FetchState +FROM chain_txns +WHERE txn_id = $1 +` + +type FetchChainTxByIDRow struct { + RawTx []byte + BlockHeight sql.NullInt32 +} + +func (q *Queries) FetchChainTxByID(ctx context.Context, txnID int64) (FetchChainTxByIDRow, error) { + row := q.db.QueryRowContext(ctx, FetchChainTxByID, txnID) + var i FetchChainTxByIDRow + err := row.Scan(&i.RawTx, &i.BlockHeight) + return i, err +} + +const FetchInternalKeyByID = `-- name: FetchInternalKeyByID :one +SELECT raw_key, key_family, key_index +FROM internal_keys +WHERE key_id = $1 +` + +type FetchInternalKeyByIDRow struct { + RawKey []byte + KeyFamily int32 + KeyIndex int32 +} + +func (q *Queries) FetchInternalKeyByID(ctx context.Context, keyID int64) (FetchInternalKeyByIDRow, error) { + row := q.db.QueryRowContext(ctx, FetchInternalKeyByID, keyID) + var i FetchInternalKeyByIDRow + err := row.Scan(&i.RawKey, &i.KeyFamily, &i.KeyIndex) + return i, err +} + +const FetchSupplyCommit = `-- name: FetchSupplyCommit :one +SELECT + sc.commit_id, + sc.output_index, + sc.output_key, + ik.raw_key AS internal_key, + txn.raw_tx, + sc.supply_root_hash AS root_hash, + sc.supply_root_sum AS root_sum +FROM supply_commit_state_machines sm +JOIN supply_commitments sc + ON sm.latest_commitment_id = sc.commit_id +JOIN chain_txns txn + ON sc.chain_txn_id = txn.txn_id +JOIN internal_keys ik + ON sc.internal_key_id = ik.key_id +WHERE + sm.group_key = $1 AND + txn.block_hash IS NOT NULL +` + +type FetchSupplyCommitRow struct { + CommitID int64 + OutputIndex sql.NullInt32 + OutputKey []byte + InternalKey []byte + RawTx []byte + RootHash []byte + RootSum sql.NullInt64 +} + +func (q *Queries) FetchSupplyCommit(ctx context.Context, groupKey []byte) (FetchSupplyCommitRow, error) { + row := q.db.QueryRowContext(ctx, FetchSupplyCommit, groupKey) + var i FetchSupplyCommitRow + err := row.Scan( + &i.CommitID, + &i.OutputIndex, + &i.OutputKey, + &i.InternalKey, + &i.RawTx, + &i.RootHash, + &i.RootSum, + ) + return i, err +} + +const FetchUnspentPrecommits = `-- name: FetchUnspentPrecommits :many +SELECT + mac.tx_output_index, + ik.raw_key AS taproot_internal_key, + mac.group_key, + mint_txn.block_height, + mint_txn.raw_tx +FROM mint_anchor_uni_commitments mac +JOIN asset_minting_batches amb ON mac.batch_id = amb.batch_id +JOIN genesis_points gp ON amb.genesis_id = gp.genesis_id +JOIN chain_txns mint_txn ON gp.anchor_tx_id = mint_txn.txn_id +JOIN internal_keys ik ON mac.taproot_internal_key_id = ik.key_id +LEFT JOIN supply_commitments sc ON mac.spent_by = sc.commit_id +LEFT JOIN chain_txns commit_txn ON sc.chain_txn_id = commit_txn.txn_id +WHERE + mac.group_key = $1 AND + (mac.spent_by IS NULL OR commit_txn.block_hash IS NULL) +` + +type FetchUnspentPrecommitsRow struct { + TxOutputIndex int32 + TaprootInternalKey []byte + GroupKey []byte + BlockHeight sql.NullInt32 + RawTx []byte +} + +func (q *Queries) FetchUnspentPrecommits(ctx context.Context, groupKey []byte) ([]FetchUnspentPrecommitsRow, error) { + rows, err := q.db.QueryContext(ctx, FetchUnspentPrecommits, groupKey) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchUnspentPrecommitsRow + for rows.Next() { + var i FetchUnspentPrecommitsRow + if err := rows.Scan( + &i.TxOutputIndex, + &i.TaprootInternalKey, + &i.GroupKey, + &i.BlockHeight, + &i.RawTx, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const FinalizeSupplyCommitTransition = `-- name: FinalizeSupplyCommitTransition :exec +UPDATE supply_commit_transitions +SET finalized = TRUE +WHERE transition_id = $1 +` + +func (q *Queries) FinalizeSupplyCommitTransition(ctx context.Context, transitionID int64) error { + _, err := q.db.ExecContext(ctx, FinalizeSupplyCommitTransition, transitionID) + return err +} + +const InsertSupplyCommitTransition = `-- name: InsertSupplyCommitTransition :one +INSERT INTO supply_commit_transitions ( + state_machine_group_key, old_commitment_id, new_commitment_id, + pending_commit_txn_id, finalized, creation_time +) VALUES ( + $1, $2, $3, $4, $5, $6 +) RETURNING transition_id +` + +type InsertSupplyCommitTransitionParams struct { + StateMachineGroupKey []byte + OldCommitmentID sql.NullInt64 + NewCommitmentID sql.NullInt64 + PendingCommitTxnID sql.NullInt64 + Finalized bool + CreationTime int64 +} + +func (q *Queries) InsertSupplyCommitTransition(ctx context.Context, arg InsertSupplyCommitTransitionParams) (int64, error) { + row := q.db.QueryRowContext(ctx, InsertSupplyCommitTransition, + arg.StateMachineGroupKey, + arg.OldCommitmentID, + arg.NewCommitmentID, + arg.PendingCommitTxnID, + arg.Finalized, + arg.CreationTime, + ) + var transition_id int64 + err := row.Scan(&transition_id) + return transition_id, err +} + +const InsertSupplyCommitment = `-- name: InsertSupplyCommitment :one +INSERT INTO supply_commitments ( + group_key, chain_txn_id, + output_index, internal_key_id, output_key, -- Core fields + block_height, block_header, merkle_proof, -- Nullable chain details + supply_root_hash, supply_root_sum -- Nullable root details +) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 +) RETURNING commit_id +` + +type InsertSupplyCommitmentParams struct { + GroupKey []byte + ChainTxnID int64 + OutputIndex sql.NullInt32 + InternalKeyID int64 + OutputKey []byte + BlockHeight sql.NullInt32 + BlockHeader []byte + MerkleProof []byte + SupplyRootHash []byte + SupplyRootSum sql.NullInt64 +} + +func (q *Queries) InsertSupplyCommitment(ctx context.Context, arg InsertSupplyCommitmentParams) (int64, error) { + row := q.db.QueryRowContext(ctx, InsertSupplyCommitment, + arg.GroupKey, + arg.ChainTxnID, + arg.OutputIndex, + arg.InternalKeyID, + arg.OutputKey, + arg.BlockHeight, + arg.BlockHeader, + arg.MerkleProof, + arg.SupplyRootHash, + arg.SupplyRootSum, + ) + var commit_id int64 + err := row.Scan(&commit_id) + return commit_id, err +} + +const InsertSupplyUpdateEvent = `-- name: InsertSupplyUpdateEvent :exec +INSERT INTO supply_update_events ( + transition_id, update_type_id, event_data +) VALUES ( + $1, $2, $3 +) +` + +type InsertSupplyUpdateEventParams struct { + TransitionID int64 + UpdateTypeID int32 + EventData []byte +} + +func (q *Queries) InsertSupplyUpdateEvent(ctx context.Context, arg InsertSupplyUpdateEventParams) error { + _, err := q.db.ExecContext(ctx, InsertSupplyUpdateEvent, arg.TransitionID, arg.UpdateTypeID, arg.EventData) + return err +} + +const QueryExistingPendingTransition = `-- name: QueryExistingPendingTransition :one +SELECT transition_id +FROM supply_commit_transitions sct +WHERE sct.state_machine_group_key = $1 AND finalized = FALSE +LIMIT 1 +` + +// Find the ID of an existing non-finalized transition for the group key +func (q *Queries) QueryExistingPendingTransition(ctx context.Context, groupKey []byte) (int64, error) { + row := q.db.QueryRowContext(ctx, QueryExistingPendingTransition, groupKey) + var transition_id int64 + err := row.Scan(&transition_id) + return transition_id, err +} + +const QueryPendingSupplyCommitTransition = `-- name: QueryPendingSupplyCommitTransition :one +WITH target_machine AS ( + SELECT group_key + FROM supply_commit_state_machines + WHERE group_key = $1 +) +SELECT + t.transition_id, + t.state_machine_group_key, + t.old_commitment_id, + t.new_commitment_id, + t.pending_commit_txn_id, + t.finalized, + t.creation_time +FROM supply_commit_transitions t +JOIN target_machine tm + ON t.state_machine_group_key = tm.group_key +WHERE t.finalized = FALSE +ORDER BY t.creation_time DESC +LIMIT 1 +` + +func (q *Queries) QueryPendingSupplyCommitTransition(ctx context.Context, groupKey []byte) (SupplyCommitTransition, error) { + row := q.db.QueryRowContext(ctx, QueryPendingSupplyCommitTransition, groupKey) + var i SupplyCommitTransition + err := row.Scan( + &i.TransitionID, + &i.StateMachineGroupKey, + &i.OldCommitmentID, + &i.NewCommitmentID, + &i.PendingCommitTxnID, + &i.Finalized, + &i.CreationTime, + ) + return i, err +} + +const QuerySupplyCommitStateMachine = `-- name: QuerySupplyCommitStateMachine :one +SELECT + sm.group_key, + sm.current_state_id, + states.state_name, + sm.latest_commitment_id +FROM supply_commit_state_machines sm +JOIN supply_commit_states states + ON sm.current_state_id = states.id +WHERE sm.group_key = $1 +` + +type QuerySupplyCommitStateMachineRow struct { + GroupKey []byte + CurrentStateID int32 + StateName string + LatestCommitmentID sql.NullInt64 +} + +func (q *Queries) QuerySupplyCommitStateMachine(ctx context.Context, groupKey []byte) (QuerySupplyCommitStateMachineRow, error) { + row := q.db.QueryRowContext(ctx, QuerySupplyCommitStateMachine, groupKey) + var i QuerySupplyCommitStateMachineRow + err := row.Scan( + &i.GroupKey, + &i.CurrentStateID, + &i.StateName, + &i.LatestCommitmentID, + ) + return i, err +} + +const QuerySupplyCommitment = `-- name: QuerySupplyCommitment :one +SELECT commit_id, group_key, chain_txn_id, output_index, internal_key_id, output_key, block_header, block_height, merkle_proof, supply_root_hash, supply_root_sum +FROM supply_commitments +WHERE commit_id = $1 +` + +func (q *Queries) QuerySupplyCommitment(ctx context.Context, commitID int64) (SupplyCommitment, error) { + row := q.db.QueryRowContext(ctx, QuerySupplyCommitment, commitID) + var i SupplyCommitment + err := row.Scan( + &i.CommitID, + &i.GroupKey, + &i.ChainTxnID, + &i.OutputIndex, + &i.InternalKeyID, + &i.OutputKey, + &i.BlockHeader, + &i.BlockHeight, + &i.MerkleProof, + &i.SupplyRootHash, + &i.SupplyRootSum, + ) + return i, err +} + +const QuerySupplyUpdateEvents = `-- name: QuerySupplyUpdateEvents :many +SELECT + ue.event_id, + ue.transition_id, + ue.update_type_id, + types.update_type_name, + ue.event_data +FROM supply_update_events ue +JOIN supply_commit_update_types types + ON ue.update_type_id = types.id +WHERE ue.transition_id = $1 +ORDER BY ue.event_id ASC +` + +type QuerySupplyUpdateEventsRow struct { + EventID int64 + TransitionID int64 + UpdateTypeID int32 + UpdateTypeName string + EventData []byte +} + +func (q *Queries) QuerySupplyUpdateEvents(ctx context.Context, transitionID int64) ([]QuerySupplyUpdateEventsRow, error) { + rows, err := q.db.QueryContext(ctx, QuerySupplyUpdateEvents, transitionID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []QuerySupplyUpdateEventsRow + for rows.Next() { + var i QuerySupplyUpdateEventsRow + if err := rows.Scan( + &i.EventID, + &i.TransitionID, + &i.UpdateTypeID, + &i.UpdateTypeName, + &i.EventData, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const UpdateSupplyCommitTransitionCommitment = `-- name: UpdateSupplyCommitTransitionCommitment :exec +UPDATE supply_commit_transitions +SET new_commitment_id = $1, + pending_commit_txn_id = $2 +WHERE transition_id = $3 +` + +type UpdateSupplyCommitTransitionCommitmentParams struct { + NewCommitmentID sql.NullInt64 + PendingCommitTxnID sql.NullInt64 + TransitionID int64 +} + +func (q *Queries) UpdateSupplyCommitTransitionCommitment(ctx context.Context, arg UpdateSupplyCommitTransitionCommitmentParams) error { + _, err := q.db.ExecContext(ctx, UpdateSupplyCommitTransitionCommitment, arg.NewCommitmentID, arg.PendingCommitTxnID, arg.TransitionID) + return err +} + +const UpdateSupplyCommitmentChainDetails = `-- name: UpdateSupplyCommitmentChainDetails :exec +UPDATE supply_commitments +SET merkle_proof = $1, + output_index = $2, + block_header = $3, + chain_txn_id = $4, + block_height = $5 +WHERE commit_id = $6 +` + +type UpdateSupplyCommitmentChainDetailsParams struct { + MerkleProof []byte + OutputIndex sql.NullInt32 + BlockHeader []byte + ChainTxnID int64 + BlockHeight sql.NullInt32 + CommitID int64 +} + +func (q *Queries) UpdateSupplyCommitmentChainDetails(ctx context.Context, arg UpdateSupplyCommitmentChainDetailsParams) error { + _, err := q.db.ExecContext(ctx, UpdateSupplyCommitmentChainDetails, + arg.MerkleProof, + arg.OutputIndex, + arg.BlockHeader, + arg.ChainTxnID, + arg.BlockHeight, + arg.CommitID, + ) + return err +} + +const UpdateSupplyCommitmentRoot = `-- name: UpdateSupplyCommitmentRoot :exec +UPDATE supply_commitments +SET supply_root_hash = $1, + supply_root_sum = $2 +WHERE commit_id = $3 +` + +type UpdateSupplyCommitmentRootParams struct { + SupplyRootHash []byte + SupplyRootSum sql.NullInt64 + CommitID int64 +} + +func (q *Queries) UpdateSupplyCommitmentRoot(ctx context.Context, arg UpdateSupplyCommitmentRootParams) error { + _, err := q.db.ExecContext(ctx, UpdateSupplyCommitmentRoot, arg.SupplyRootHash, arg.SupplyRootSum, arg.CommitID) + return err +} + +const UpsertSupplyCommitStateMachine = `-- name: UpsertSupplyCommitStateMachine :one +WITH target_state AS ( + -- Select the ID for the provided state name, if it exists. + SELECT id + FROM supply_commit_states s1 + WHERE s1.state_name = $3 +), default_state AS ( + -- Select the ID for the 'DefaultState'. + SELECT id + FROM supply_commit_states s2 + WHERE s2.state_name = 'DefaultState' +) +INSERT INTO supply_commit_state_machines ( + group_key, current_state_id, latest_commitment_id +) VALUES ( + $1, + -- Use the target state ID if found, otherwise use the default state ID. + coalesce((SELECT id FROM target_state), (SELECT id FROM default_state)), + $2 +) +ON CONFLICT (group_key) +DO UPDATE SET + -- Update state ID only if a target state ID was found, otherwise keep existing. + current_state_id = coalesce((SELECT id FROM target_state), supply_commit_state_machines.current_state_id), + latest_commitment_id = coalesce($2, supply_commit_state_machines.latest_commitment_id) +RETURNING current_state_id, latest_commitment_id +` + +type UpsertSupplyCommitStateMachineParams struct { + GroupKey []byte + LatestCommitmentID sql.NullInt64 + StateName sql.NullString +} + +type UpsertSupplyCommitStateMachineRow struct { + CurrentStateID int32 + LatestCommitmentID sql.NullInt64 +} + +// Return the ID of the state that was actually set (either inserted or updated), +// and the latest commitment ID that was set. +func (q *Queries) UpsertSupplyCommitStateMachine(ctx context.Context, arg UpsertSupplyCommitStateMachineParams) (UpsertSupplyCommitStateMachineRow, error) { + row := q.db.QueryRowContext(ctx, UpsertSupplyCommitStateMachine, arg.GroupKey, arg.LatestCommitmentID, arg.StateName) + var i UpsertSupplyCommitStateMachineRow + err := row.Scan(&i.CurrentStateID, &i.LatestCommitmentID) + return i, err +} From d26d6607248e46dec2f80a916d5f2fd6649499bc Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:49:52 -0700 Subject: [PATCH 6/8] tapdb: add concrete implementation of supplycommit.CommitmentTracker and supplycommit.StateMachineStore In this commit, we add concrete implementation of both CommitmentTracker and StateMachineStore. This implements the persistence layer for the supply commit state machine. The main method to understand is ApplyStateTransition, as it implements the atomic update of all the various components (tree, state transition, etc) on disk. --- tapdb/supply_commit.go | 1273 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1273 insertions(+) create mode 100644 tapdb/supply_commit.go diff --git a/tapdb/supply_commit.go b/tapdb/supply_commit.go new file mode 100644 index 000000000..13a56683c --- /dev/null +++ b/tapdb/supply_commit.go @@ -0,0 +1,1273 @@ +package tapdb + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "io" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + lfn "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/lnutils" +) + +// commitmentChainInfo holds optional chain confirmation details for a +// commitment. +type commitmentChainInfo struct { + BlockHeader *wire.BlockHeader + MerkleProof *proof.TxMerkleProof + BlockHeight uint32 +} + +type ( + // UnspentPrecommits is an alias for the sqlc type representing an + // unspent pre-commitment row. + UnspentPrecommits = sqlc.FetchUnspentPrecommitsRow + + // SupplyCommit is an alias for the sqlc type. + SupplyCommit = sqlc.FetchSupplyCommitRow + + // QuerySupplyStateMachineResp is an alias for the sqlc type + // representing a state machine row. + QuerySupplyStateMachineResp = sqlc.QuerySupplyCommitStateMachineRow + + // QuerySupplyUpdateResp is an alias for the sqlc type representing + // supply update event rows. + QuerySupplyUpdateResp = sqlc.QuerySupplyUpdateEventsRow + + // SupplyCommitment is an alias for the sqlc type representing a supply + // commitment. + SupplyCommitment = sqlc.SupplyCommitment + + // ChainTxn is an alias for the sqlc type representing a chain + // transaction. + ChainTxn = sqlc.ChainTxn + + // SupplyCommitTransition is an alias for the sqlc type representing a + // supply + // commit transition. + SupplyCommitTransition = sqlc.SupplyCommitTransition + + // SupplyCommitMachineParams is an alias for the sqlc type. + SupplyCommitMachineParams = sqlc.UpsertSupplyCommitStateMachineParams + + // InsertSupplyCommitTransition is an alias for the sqlc type. + InsertSupplyCommitTransition = sqlc.InsertSupplyCommitTransitionParams + + // InsertSupplyUpdateEvent is an alias for the sqlc type. + InsertSupplyUpdateEvent = sqlc.InsertSupplyUpdateEventParams + + // UpsertChainTxParams is an alias for the sqlc type. + UpsertChainTxParams = sqlc.UpsertChainTxParams + + // SupplyCommitChainDetails is an alias for the sqlc type. + SupplyCommitChainDetails = sqlc.UpdateSupplyCommitmentChainDetailsParams + + // FetchInternalKeyByIDRow is an alias for the sqlc type. + FetchInternalKeyByIDRow = sqlc.FetchInternalKeyByIDRow + + // FetchChainTxByIDRow is an alias for the sqlc type. + FetchChainTxByIDRow = sqlc.FetchChainTxByIDRow + + // FetchUniverseSupplyRootRow is an alias for the sqlc type. + FetchUniverseSupplyRootRow = sqlc.FetchUniverseSupplyRootRow + + // UpdateSupplyCommitTransitionCommitmentParams is an alias for the + // sqlc type. + //nolint:lll + UpdateSupplyCommitTransitionCommitmentParams = sqlc.UpdateSupplyCommitTransitionCommitmentParams + + // UpdateSupplyCommitmentRootParams is an alias for the sqlc type. + UpdateSupplyCommitmentRootParams = sqlc.UpdateSupplyCommitmentRootParams +) + +// SupplyCommitStore is the interface that provides the database methods needed +// to implement the supplycommit.CommitmentTracker and +// supplycommit.StateMachineStore interfaces. +type SupplyCommitStore interface { + TreeStore + BaseUniverseStore + + // FetchUnspentPrecommits fetches all unspent pre-commitments for a + // given group key. + FetchUnspentPrecommits(ctx context.Context, + groupKey []byte) ([]UnspentPrecommits, error) + + // FetchSupplyCommit fetches the latest confirmed supply commitment for + // a given group key. + FetchSupplyCommit(ctx context.Context, + groupKey []byte) (SupplyCommit, error) + + // UpsertSupplyCommitStateMachine upserts the state machine entry and + // returns the resulting state ID and latest commitment ID. + UpsertSupplyCommitStateMachine( + ctx context.Context, arg SupplyCommitMachineParams, + ) (sqlc.UpsertSupplyCommitStateMachineRow, error) + + // QueryPendingSupplyCommitTransition fetches the latest non-finalized + // transition for a group key. + QueryPendingSupplyCommitTransition(ctx context.Context, + groupKey []byte) (SupplyCommitTransition, error) + + // InsertSupplyCommitTransition inserts a new transition record. + InsertSupplyCommitTransition(ctx context.Context, + arg InsertSupplyCommitTransition) (int64, error) + + // InsertSupplyUpdateEvent inserts a new supply update event associated + // with a transition. + InsertSupplyUpdateEvent(ctx context.Context, + arg InsertSupplyUpdateEvent) error + + // UpsertChainTx upserts a chain transaction. + UpsertChainTx( + ctx context.Context, arg UpsertChainTxParams, + ) (int64, error) + + // UpdateSupplyCommitTransitionCommitment updates the pending commit tx + // ID for a + // transition and the new commitment ID. + UpdateSupplyCommitTransitionCommitment(ctx context.Context, + arg UpdateSupplyCommitTransitionCommitmentParams) error + + // InsertSupplyCommitment inserts a new supply commitment record. + InsertSupplyCommitment(ctx context.Context, + arg sqlc.InsertSupplyCommitmentParams) (int64, error) + + // QuerySupplyCommitStateMachine fetches the state machine details. + QuerySupplyCommitStateMachine(ctx context.Context, + groupKey []byte) (QuerySupplyStateMachineResp, error) + + // QuerySupplyUpdateEvents fetches all update events for a transition. + QuerySupplyUpdateEvents(ctx context.Context, + transitionID int64) ([]QuerySupplyUpdateResp, error) + + // QuerySupplyCommitment fetches a specific supply commitment by ID. + QuerySupplyCommitment(ctx context.Context, + commitID int64) (sqlc.SupplyCommitment, error) + + // FetchChainTx fetches a chain transaction by its TXID. + FetchChainTx(ctx context.Context, txid []byte) (ChainTxn, error) + + // UpdateSupplyCommitmentChainDetails updates the chain-specific details + // of a supply commitment after confirmation. + UpdateSupplyCommitmentChainDetails(ctx context.Context, + arg SupplyCommitChainDetails) error + + // UpdateSupplyCommitmentRoot updates the SMT root hash and sum for a + // given supply commitment. + UpdateSupplyCommitmentRoot(ctx context.Context, + arg UpdateSupplyCommitmentRootParams) error + + // FinalizeSupplyCommitTransition marks a transition as finalized. + FinalizeSupplyCommitTransition(ctx context.Context, + transitionID int64) error + + // QueryExistingPendingTransition fetches the ID of an existing + // non-finalized transition for a group key. Returns sql.ErrNoRows if + // none exists. + QueryExistingPendingTransition(ctx context.Context, + groupKey []byte) (int64, error) + + // FetchInternalKeyByID fetches an internal key by its primary key ID. + FetchInternalKeyByID(ctx context.Context, + keyID int64) (FetchInternalKeyByIDRow, error) + + // FetchChainTxByID fetches a chain transaction by its primary key ID. + FetchChainTxByID(ctx context.Context, + txnID int64) (FetchChainTxByIDRow, error) + + // FetchUniverseSupplyRoot fetches the root hash and sum for a supply + // tree namespace. + FetchUniverseSupplyRoot(ctx context.Context, + namespaceRoot string) (FetchUniverseSupplyRootRow, error) +} + +// BatchedSupplyCommitStore is a wrapper around the base SupplyCommitStore that +// allows us to perform batch queries within a single transaction. +type BatchedSupplyCommitStore interface { + SupplyCommitStore + + // We embed the BatchedTx interface for BaseUniverseStore as it includes + // TreeStore methods needed by SupplyCommitStore. + BatchedTx[SupplyCommitStore] +} + +// SupplyCommitMachine implements the supplycommit.CommitmentTracker and +// supplycommit.StateMachineStore interfaces using the database queries +// defined in SupplyCommitStore. +type SupplyCommitMachine struct { + db BatchedSupplyCommitStore +} + +// NewSupplyCommitMachine creates a new SupplyCommitMachine instance. +func NewSupplyCommitMachine(db BatchedSupplyCommitStore) *SupplyCommitMachine { + return &SupplyCommitMachine{ + db: db, + } +} + +// UnspentPrecommits returns the set of unspent pre-commitments for a given +// asset spec. The asset spec will only specify a group key, and not also an +// asset ID. +func (s *SupplyCommitMachine) UnspentPrecommits(ctx context.Context, + assetSpec asset.Specifier) lfn.Result[supplycommit.PreCommits] { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return lfn.Err[supplycommit.PreCommits](ErrMissingGroupKey) + } + groupKeyBytes := groupKey.SerializeCompressed() + + var preCommits supplycommit.PreCommits + readTx := ReadTxOption() + dbErr := s.db.ExecTx(ctx, readTx, func(db SupplyCommitStore) error { + rows, err := db.FetchUnspentPrecommits(ctx, groupKeyBytes) + if err != nil { + // It's okay if there are no unspent pre-commits. + if errors.Is(err, sql.ErrNoRows) { + return nil + } + return fmt.Errorf("error fetching unspent "+ + "precommits: %w", err) + } + + // For each pre-commitment, parse the internal key and group + // key, and assemble the final struct as needed by the + // interface. + preCommits = make(supplycommit.PreCommits, 0, len(rows)) + for _, row := range rows { + internalKey, err := btcec.ParsePubKey( + row.TaprootInternalKey, + ) + if err != nil { + return fmt.Errorf("error parsing internal "+ + "key: %w", err) + } + + groupPubKey, err := btcec.ParsePubKey(row.GroupKey) + if err != nil { + return fmt.Errorf("error parsing group key: %w", + err) + } + + var mintingTx wire.MsgTx + err = mintingTx.Deserialize(bytes.NewReader(row.RawTx)) + if err != nil { + return fmt.Errorf("error deserializing "+ + "minting tx: %w", err) + } + + preCommit := supplycommit.PreCommitment{ + BlockHeight: uint32( + row.BlockHeight.Int32, + ), + MintingTxn: &mintingTx, + OutIdx: uint32(row.TxOutputIndex), + InternalKey: *internalKey, + GroupPubKey: *groupPubKey, + } + preCommits = append(preCommits, preCommit) + } + + return nil + }) + if dbErr != nil { + return lfn.Err[supplycommit.PreCommits](dbErr) + } + + return lfn.Ok(preCommits) +} + +// SupplyCommit returns the root commitment for a given asset spec. From the PoV +// of the chain, this is a singleton instance. +func (s *SupplyCommitMachine) SupplyCommit(ctx context.Context, + assetSpec asset.Specifier) supplycommit.RootCommitResp { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return lfn.Err[lfn.Option[supplycommit.RootCommitment]]( + ErrMissingGroupKey, + ) + } + groupKeyBytes := groupKey.SerializeCompressed() + + var rootCommitmentOpt lfn.Option[supplycommit.RootCommitment] + + readTx := ReadTxOption() + dbErr := s.db.ExecTx(ctx, readTx, func(db SupplyCommitStore) error { + row, err := db.FetchSupplyCommit(ctx, groupKeyBytes) + if err != nil { + // If no commitment is found, return None. + if errors.Is(err, sql.ErrNoRows) { + return nil + } + + return fmt.Errorf("error fetching supply commit: %w", + err) + } + + internalKey, err := btcec.ParsePubKey(row.InternalKey) + if err != nil { + return fmt.Errorf("error parsing internal key: %w", err) + } + + outputKey, err := btcec.ParsePubKey(row.OutputKey) + if err != nil { + return fmt.Errorf("error parsing output key: %w", err) + } + + var commitTx wire.MsgTx + err = commitTx.Deserialize(bytes.NewReader(row.RawTx)) + if err != nil { + return fmt.Errorf("error deserializing commit tx: %w", + err) + } + + // Construct the root node directly from the stored hash and + // sum. Handle potential NULL values if the root wasn't set yet + // (though FetchSupplyCommit filters for confirmed TX, so it + // should be set). + var ( + rootHash mssmt.NodeHash + rootSum uint64 + rootNode *mssmt.BranchNode + ) + if len(row.RootHash) != 0 && row.RootSum.Valid { + copy(rootHash[:], row.RootHash) + rootSum = uint64(row.RootSum.Int64) + rootNode = mssmt.NewComputedBranch(rootHash, rootSum) + } else { + // Should not happen due to query filter, but handle + // defensively. + log.Warnf("SupplyCommit: Fetched confirmed commit %d "+ + "but root hash/sum is NULL", row.CommitID) + + rootNode = mssmt.NewComputedBranch( + mssmt.EmptyTreeRootHash, 0, + ) + } + + rootCommitment := supplycommit.RootCommitment{ + Txn: &commitTx, + TxOutIdx: uint32(row.OutputIndex.Int32), + InternalKey: internalKey, + OutputKey: outputKey, + SupplyRoot: rootNode, + } + rootCommitmentOpt = lfn.Some(rootCommitment) + + return nil + }) + if dbErr != nil { + return lfn.Err[lfn.Option[supplycommit.RootCommitment]](dbErr) + } + + return lfn.Ok(rootCommitmentOpt) +} + +// stateToDBString maps a supplycommit.State interface to its database string +// representation. +func stateToDBString(state supplycommit.State) (string, error) { + switch state.(type) { + case *supplycommit.DefaultState: + return "DefaultState", nil + case *supplycommit.UpdatesPendingState: + return "UpdatesPendingState", nil + case *supplycommit.CommitTreeCreateState: + return "CommitTreeCreateState", nil + case *supplycommit.CommitTxCreateState: + return "CommitTxCreateState", nil + case *supplycommit.CommitTxSignState: + return "CommitTxSignState", nil + case *supplycommit.CommitBroadcastState: + return "CommitBroadcastState", nil + case *supplycommit.CommitFinalizeState: + return "CommitFinalizeState", nil + default: + return "", fmt.Errorf("unknown state type: %T", state) + } +} + +// stateToInt maps a supplycommit.State to its integer ID used in the DB. +func stateToInt(state supplycommit.State) (int32, error) { + switch state.(type) { + case *supplycommit.DefaultState: + return 0, nil + case *supplycommit.UpdatesPendingState: + return 1, nil + case *supplycommit.CommitTreeCreateState: + return 2, nil + case *supplycommit.CommitTxCreateState: + return 3, nil + case *supplycommit.CommitTxSignState: + return 4, nil + case *supplycommit.CommitBroadcastState: + return 5, nil + case *supplycommit.CommitFinalizeState: + return 6, nil + default: + return -1, fmt.Errorf("unknown state type: %T", state) + } +} + +// intToState maps an integer state ID from the DB to a supplycommit.State. +func intToState(stateID int32) (supplycommit.State, error) { + switch stateID { + case 0: + return &supplycommit.DefaultState{}, nil + case 1: + return &supplycommit.UpdatesPendingState{}, nil + case 2: + return &supplycommit.CommitTreeCreateState{}, nil + case 3: + return &supplycommit.CommitTxCreateState{}, nil + case 4: + return &supplycommit.CommitTxSignState{}, nil + case 5: + return &supplycommit.CommitBroadcastState{}, nil + case 6: + return &supplycommit.CommitFinalizeState{}, nil + default: + return nil, fmt.Errorf("unknown state ID: %d", stateID) + } +} + +// updateTypeToInt maps a supplycommit.SupplySubTree to its integer ID. +func updateTypeToInt(treeType supplycommit.SupplySubTree) (int32, error) { + switch treeType { + case supplycommit.MintTreeType: + return 0, nil + case supplycommit.BurnTreeType: + return 1, nil + case supplycommit.IgnoreTreeType: + return 2, nil + default: + return -1, fmt.Errorf("unknown tree type: %v", treeType) + } +} + +// serializeSupplyUpdateEvent encodes a SupplyUpdateEvent into bytes. +func serializeSupplyUpdateEvent(w io.Writer, + event supplycommit.SupplyUpdateEvent) error { + + switch e := event.(type) { + case *supplycommit.NewMintEvent: + return e.Encode(w) + + case *supplycommit.NewBurnEvent: + return e.Encode(w) + + case *supplycommit.NewIgnoreEvent: + return e.Encode(w) + + default: + return fmt.Errorf("unknown event type: %T", event) + } +} + +// deserializeSupplyUpdateEvent decodes bytes into a SupplyUpdateEvent. +func deserializeSupplyUpdateEvent(typeName string, + r io.Reader) (supplycommit.SupplyUpdateEvent, error) { + + switch typeName { + case "mint": + mint := new(supplycommit.NewMintEvent) + if err := mint.Decode(r); err != nil { + return nil, fmt.Errorf("failed to decode mint "+ + "event: %w", err) + } + + return mint, nil + case "burn": + burn := new(supplycommit.NewBurnEvent) + if err := burn.Decode(r); err != nil { + return nil, fmt.Errorf("failed to decode burn "+ + "event: %w", err) + } + + return burn, nil + case "ignore": + ignore := new(supplycommit.NewIgnoreEvent) + if err := ignore.Decode(r); err != nil { + return nil, fmt.Errorf("failed to decode ignore "+ + "event: %w", err) + } + + return ignore, nil + + default: + return nil, fmt.Errorf("unknown update type name: %s", typeName) + } +} + +// InsertPendingUpdate attempts to insert a new pending update into the +// update log of the target supply commit state machine. +func (s *SupplyCommitMachine) InsertPendingUpdate(ctx context.Context, + assetSpec asset.Specifier, event supplycommit.SupplyUpdateEvent) error { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + writeTx := WriteTxOption() + return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, we'll upsert a new state machine. We pass a null state + // name, as it'll be made with default if doesn't exist. The + // query returns the actual state ID set and the latest + // commitment ID. + upsertResult, err := db.UpsertSupplyCommitStateMachine( + ctx, SupplyCommitMachineParams{ + GroupKey: groupKeyBytes, + StateName: sql.NullString{}, + }, + ) + if err != nil { + return fmt.Errorf("failed initial upsert for "+ + "state machine: %w", err) + } + currentStateID := upsertResult.CurrentStateID + + // Make sure that we're in either the DefaultState or + // UpdatesPendingState state as a sanity check. + currentState, err := intToState(currentStateID) + if err != nil { + // This indicates an unexpected state ID returned + // from the DB. + return fmt.Errorf("invalid state ID %d "+ + "returned from upsert: %w", currentStateID, err) + } + currentStateName := currentState.String() + if currentStateName != "DefaultState" && + currentStateName != "UpdatesPendingState" { + + return fmt.Errorf("cannot insert pending "+ + "update in state: %s", currentStateName) + } + + // Now that we know the state machine is in the proper state, + // we'll fetch the transition ID, which will be needed below. + var transitionID int64 + existingTransitionID, err := db.QueryExistingPendingTransition( + ctx, groupKeyBytes, + ) + + switch { + // If no existing pending transition, create one. + case errors.Is(err, sql.ErrNoRows): + // Use the latest commitment ID returned by the upsert. + latestCommitID := upsertResult.LatestCommitmentID + + transitionID, err = db.InsertSupplyCommitTransition( + ctx, InsertSupplyCommitTransition{ + StateMachineGroupKey: groupKeyBytes, + OldCommitmentID: latestCommitID, + Finalized: false, + CreationTime: time.Now().Unix(), + }, + ) + if err != nil { + return fmt.Errorf("failed to insert "+ + "new transition: %w", err) + } + + // If the query failed all together, then we'll bail now. + case err != nil: + return fmt.Errorf("failed to query existing "+ + "pending transition: %w", err) + + // Otherwise, we found an existing pending transition, so we can + // just use that transition ID. + default: + transitionID = existingTransitionID + } + + // With the transition created or found, we can now serialize, + // then insert the update event. + var b bytes.Buffer + err = serializeSupplyUpdateEvent(&b, event) + if err != nil { + return fmt.Errorf("failed to serialize event "+ + "data: %w", err) + } + updateTypeID, err := updateTypeToInt(event.SupplySubTreeType()) + if err != nil { + return fmt.Errorf("failed to map update type: %w", err) + } + err = db.InsertSupplyUpdateEvent( + ctx, InsertSupplyUpdateEvent{ + TransitionID: transitionID, + UpdateTypeID: updateTypeID, + EventData: b.Bytes(), + }, + ) + if err != nil { + return fmt.Errorf("failed to insert update "+ + "event: %w", err) + } + + // Finally, we'll explicitly set the state machine to the + // UpdatesPendingState. + updatesPendingStateName, err := stateToDBString( + &supplycommit.UpdatesPendingState{}, + ) + if err != nil { + return fmt.Errorf("error getting pending "+ + "state name: %w", err) + } + // We only update the state name here, leaving the commitment ID + // as is (by passing NULL). + _, err = db.UpsertSupplyCommitStateMachine( + ctx, SupplyCommitMachineParams{ //nolint:gocritic + GroupKey: groupKeyBytes, + StateName: sqlStr(updatesPendingStateName), + }, + ) + if err != nil { + return fmt.Errorf("failed to update state "+ + "machine to pending: %w", err) + } + + return nil + }) +} + +// InsertSignedCommitTx associates a new signed commitment anchor transaction +// with the current active supply commitment state transition. +func (s *SupplyCommitMachine) InsertSignedCommitTx(ctx context.Context, + assetSpec asset.Specifier, commitDetails supplycommit.SupplyCommitTxn, +) error { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + commitTx := commitDetails.Txn + internalKey := commitDetails.InternalKey + outputKey := commitDetails.OutputKey + outputIndex := commitDetails.OutputIndex + + writeTx := WriteTxOption() + return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, we'll locate the current pending transition for the + // state machine. + pendingTransition, err := db.QueryPendingSupplyCommitTransition( + ctx, groupKeyBytes, + ) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("no pending transition "+ + "found for group key %x", + groupKeyBytes) + } + + return fmt.Errorf("failed to query pending "+ + "transition: %w", err) + } + + // Next, we'll upsert the chain transaction on disk. The block + // related fields are nil as this hasn't been confirmed yet. + var txBytes bytes.Buffer + if err := commitTx.Serialize(&txBytes); err != nil { + return fmt.Errorf("failed to serialize commit "+ + "tx: %w", err) + } + txid := commitTx.TxHash() + chainTxID, err := db.UpsertChainTx(ctx, UpsertChainTxParams{ + Txid: txid[:], + RawTx: txBytes.Bytes(), + }) + if err != nil { + return fmt.Errorf("failed to upsert commit chain tx: "+ + "%w", err) + } + + // Upsert the internal key to get its ID. We assume key family + // and index 0 for now, as this key is likely externally. + internalKeyID, err := db.UpsertInternalKey(ctx, InternalKey{ + RawKey: internalKey.SerializeCompressed(), + }) + if err != nil { + return fmt.Errorf("failed to upsert internal key %x: "+ + "%w", + internalKey.SerializeCompressed(), err) + } + + // Insert the new commitment record. Chain details (block + // height, header, proof, output index) are NULL at this stage. + //nolint:lll + newCommitmentID, err := db.InsertSupplyCommitment(ctx, sqlc.InsertSupplyCommitmentParams{ + GroupKey: groupKeyBytes, + ChainTxnID: chainTxID, + InternalKeyID: internalKeyID, + OutputKey: outputKey.SerializeCompressed(), + SupplyRootHash: nil, + SupplyRootSum: sql.NullInt64{}, + OutputIndex: sqlInt32(outputIndex), + }) + if err != nil { + return fmt.Errorf("failed to insert new supply "+ + "commitment: %w", err) + } + + // Update the transition record to link to the new commitment ID + // and the pending chain transaction ID in a single query. + err = db.UpdateSupplyCommitTransitionCommitment( + ctx, UpdateSupplyCommitTransitionCommitmentParams{ + NewCommitmentID: sqlInt64(newCommitmentID), + PendingCommitTxnID: sqlInt64(chainTxID), + TransitionID: pendingTransition.TransitionID, //nolint:lll + }, + ) + if err != nil { + return fmt.Errorf("failed to update transition "+ + "commitment: %w", err) + } + + // As the final step, we'll now update the state on disk to move + // broadcast the commit txn we just signed. + // to the broadcast state. This ensures that on restart we'll + broadcastStateName, err := stateToDBString( + &supplycommit.CommitBroadcastState{}, + ) + if err != nil { + return fmt.Errorf("error getting broadcast state "+ + "name: %w", err) + } + // We only update the state name here, leaving the commitment ID + // as is (by passing NULL). + _, err = db.UpsertSupplyCommitStateMachine( + ctx, SupplyCommitMachineParams{ + GroupKey: groupKeyBytes, + StateName: sqlStr(broadcastStateName), + }, + ) + if err != nil { + return fmt.Errorf("failed to update state machine "+ + "state: %w", err) + } + + return nil + }) +} + +// CommitState commits the state of the state machine to disk. +func (s *SupplyCommitMachine) CommitState(ctx context.Context, + assetSpec asset.Specifier, state supplycommit.State) error { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + newStateName, err := stateToDBString(state) + if err != nil { + return fmt.Errorf("failed to map state to string: %w", err) + } + + writeTx := WriteTxOption() + return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // We only update the state name here, leaving the commitment ID + // as is (by passing NULL). + _, err = db.UpsertSupplyCommitStateMachine( + ctx, SupplyCommitMachineParams{ + GroupKey: groupKeyBytes, + StateName: sqlStr(newStateName), + }, + ) + if err != nil { + return fmt.Errorf("failed to update state machine "+ + "state: %w", err) + } + return nil + }) +} + +// fetchCommitment is a helper to fetch and reconstruct a RootCommitment and +// its associated chain confirmation details. +func fetchCommitment(ctx context.Context, db SupplyCommitStore, + commitID sql.NullInt64, groupKeyBytes []byte, +) (lfn.Option[supplycommit.RootCommitment], + lfn.Option[commitmentChainInfo], error) { + + noneRootCommit := lfn.None[supplycommit.RootCommitment]() + noneChainInfo := lfn.None[commitmentChainInfo]() + + if !commitID.Valid { + return noneRootCommit, noneChainInfo, nil + } + + // First, fetch the supply commitment itself. + commit, err := db.QuerySupplyCommitment(ctx, commitID.Int64) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return noneRootCommit, noneChainInfo, nil + } + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "query commitment %d: %w", commitID.Int64, err) + } + + internalKeyRow, err := db.FetchInternalKeyByID( + ctx, commit.InternalKeyID, + ) + if err != nil { + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "fetch internal key %d for commit %d: %w", + commit.InternalKeyID, commitID.Int64, err) + } + internalKey, err := btcec.ParsePubKey(internalKeyRow.RawKey) + if err != nil { + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "parse internal key for commit %d: %w", commitID.Int64, + err) + } + outputKey, err := btcec.ParsePubKey(commit.OutputKey) + if err != nil { + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "parse output key for commit %d: %w", commitID.Int64, + err) + } + + // Fetch and deserialize the transaction. + var commitTx wire.MsgTx + chainTxRow, err := db.FetchChainTxByID(ctx, commit.ChainTxnID) + if err != nil { + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "fetch chain tx %d for commit %d: %w", + commit.ChainTxnID, commitID.Int64, err) + } + err = commitTx.Deserialize(bytes.NewReader(chainTxRow.RawTx)) + if err != nil { + return noneRootCommit, noneChainInfo, fmt.Errorf("failed to "+ + "deserialize commit tx for commit %d: %w", + commitID.Int64, err) + } + + // Construct the SMT root node from the stored hash and sum. If they are + // NULL (e.g., initial commit before ApplyStateTransition ran), use the + // empty root. + var rootNode *mssmt.BranchNode + if commit.SupplyRootHash == nil || !commit.SupplyRootSum.Valid { + log.Warnf("fetchCommitment: Supply root hash/sum is NULL for "+ + "commit %d, using empty root", commitID.Int64) + rootNode = mssmt.NewComputedBranch(mssmt.EmptyTreeRootHash, 0) + } else { + var rootHash mssmt.NodeHash + copy(rootHash[:], commit.SupplyRootHash) + rootSum := uint64(commit.SupplyRootSum.Int64) + rootNode = mssmt.NewComputedBranch(rootHash, rootSum) + } + + rootCommitment := supplycommit.RootCommitment{ + Txn: &commitTx, + TxOutIdx: uint32(commit.OutputIndex.Int32), + InternalKey: internalKey, + OutputKey: outputKey, + SupplyRoot: rootNode, + } + + // Now, attempt to construct the chain info if confirmed. + var chainInfoOpt lfn.Option[commitmentChainInfo] + + // If we have a valid block height, then that means that the block + // header and/or merkle proof may also be present. + if commit.BlockHeight.Valid { + blockHeight := uint32(commit.BlockHeight.Int32) + + var blockHeader *wire.BlockHeader + if len(commit.BlockHeader) > 0 { + blockHeader = &wire.BlockHeader{} + err = blockHeader.Deserialize( + bytes.NewReader(commit.BlockHeader), + ) + if err != nil { + // Log error but don't fail the whole fetch + log.Errorf("fetchCommitment: failed to "+ + "deserialize block header "+ + "for commit %d: %v", commitID.Int64, + err) + blockHeader = nil + } + } + + var merkleProof *proof.TxMerkleProof + if len(commit.MerkleProof) > 0 { + merkleProof = &proof.TxMerkleProof{} + err = merkleProof.Decode(bytes.NewReader( + commit.MerkleProof, + )) + if err != nil { + log.Errorf("fetchCommitment: failed to "+ + "decode merkle proof for commit %d: "+ + "%v", commitID.Int64, err) + merkleProof = nil + } + } + + if blockHeader != nil && merkleProof != nil { + chainInfoOpt = lfn.Some(commitmentChainInfo{ + BlockHeader: blockHeader, + MerkleProof: merkleProof, + BlockHeight: blockHeight, + }) + } else { + log.Warnf("fetchCommitment: commit %d has block "+ + "height but missing header (%v) or proof (%v)", + commitID.Int64, blockHeader == nil, + merkleProof == nil) + } + } + + return lfn.Some(rootCommitment), chainInfoOpt, nil +} + +// FetchState attempts to fetch the state of the state machine for the +// target asset specifier. +func (s *SupplyCommitMachine) FetchState(ctx context.Context, + assetSpec asset.Specifier) (supplycommit.State, + lfn.Option[supplycommit.SupplyStateTransition], error) { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return nil, lfn.None[supplycommit.SupplyStateTransition](), + ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + var ( + state supplycommit.State + stateTransition supplycommit.SupplyStateTransition + foundTransition bool + pendingUpdates []supplycommit.SupplyUpdateEvent + oldCommitmentOpt lfn.Option[supplycommit.RootCommitment] + newCommit supplycommit.RootCommitment + chainProofOpt lfn.Option[supplycommit.ChainProof] + ) + + readTx := ReadTxOption() + err := s.db.ExecTx(ctx, readTx, func(db SupplyCommitStore) error { + // First, we'll attempt to fetch the supply state machine for + // this group key. + stateMachine, err := db.QuerySupplyCommitStateMachine( + ctx, groupKeyBytes, + ) + if err != nil { + // If no state machine exists, return default state and + // no transition. + if errors.Is(err, sql.ErrNoRows) { + // Not an error, just no state persisted yet. + state = &supplycommit.DefaultState{} + return nil + } + return fmt.Errorf("failed to query state machine: "+ + "%w", err) + } + + // Map the DB state ID to the interface state type. + state, err = intToState(stateMachine.CurrentStateID) + if err != nil { + return fmt.Errorf("failed to map state ID: %w", err) + } + + // Next, we'll fetch the current pending state transition, if it + // exists for this group key. If not, then we can return early + // as we only have the default state. + dbTransition, err := db.QueryPendingSupplyCommitTransition( + ctx, groupKeyBytes, + ) + if err != nil { + // No pending transition, state transition remains + // empty. + if errors.Is(err, sql.ErrNoRows) { + return nil + } + return fmt.Errorf("failed to query pending "+ + "transition: %w", err) + } + foundTransition = true + + // Now that we know we have a state transition, we'll query for + // all the pending updates related to the state transition. + eventRows, err := db.QuerySupplyUpdateEvents( + ctx, dbTransition.TransitionID, + ) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("failed to query update events: "+ + "%w", err) + } + pendingUpdates = make( + []supplycommit.SupplyUpdateEvent, 0, len(eventRows), + ) + for _, eventRow := range eventRows { + event, err := deserializeSupplyUpdateEvent( + eventRow.UpdateTypeName, + bytes.NewReader(eventRow.EventData), + ) + if err != nil { + return fmt.Errorf("failed to deserialize "+ + "event: %w", err) + } + pendingUpdates = append(pendingUpdates, event) + } + + // Next, we'll fetch the old and new commitments. If this is the + // very first state transition, there won't be an old + // commitment. + oldCommitmentOpt, _, err = fetchCommitment( + ctx, db, dbTransition.OldCommitmentID, groupKeyBytes, + ) + if err != nil { + return fmt.Errorf("failed fetching old "+ + "commitment: %w", err) + } + newCommitmentOpt, newCommitChainInfoOpt, err := fetchCommitment( + ctx, db, dbTransition.NewCommitmentID, groupKeyBytes, + ) + if err != nil { + return fmt.Errorf("failed fetching new "+ + "commitment: %w", err) + } + + // Construct the ChainProof if the new commitment's chain info + // is present. + newCommitChainInfoOpt.WhenSome(func(info commitmentChainInfo) { + if info.BlockHeader != nil && info.MerkleProof != nil { + chainProofOpt = lfn.Some(supplycommit.ChainProof{ //nolint:lll + Header: *info.BlockHeader, + BlockHeight: info.BlockHeight, + MerkleProof: *info.MerkleProof, + }) + } + }) + + newCommit = newCommitmentOpt.UnwrapOr( + supplycommit.RootCommitment{}, + ) + + return nil + }) + if err != nil { + return nil, lfn.None[supplycommit.SupplyStateTransition](), err + } + + // If a transition was found, reconstruct it and wrap in Some. + if foundTransition { + stateTransition = supplycommit.SupplyStateTransition{ + OldCommitment: oldCommitmentOpt, + PendingUpdates: pendingUpdates, + NewCommitment: newCommit, + ChainProof: chainProofOpt, + } + return state, lfn.Some(stateTransition), nil + } + + // No transition was found (err was sql.ErrNoRows earlier). + return state, lfn.None[supplycommit.SupplyStateTransition](), nil +} + +// ApplyStateTransition applies a new state transition to the target state +// machine. +func (s *SupplyCommitMachine) ApplyStateTransition( + ctx context.Context, assetSpec asset.Specifier, + transition supplycommit.SupplyStateTransition) error { + + groupKey := assetSpec.UnwrapGroupKeyToPtr() + if groupKey == nil { + return ErrMissingGroupKey + } + groupKeyBytes := groupKey.SerializeCompressed() + + // Ensure we have the new commitment details. + newCommitment := transition.NewCommitment + if newCommitment.SupplyRoot == nil || newCommitment.Txn == nil { + return fmt.Errorf("ApplyStateTransition requires a complete " + + "NewCommitment") + } + + writeTx := WriteTxOption() + return s.db.ExecTx(ctx, writeTx, func(db SupplyCommitStore) error { + // First, we'll locate the state transition that we need to + // finalize based on the group key. + dbTransition, err := db.QueryPendingSupplyCommitTransition( + ctx, groupKeyBytes, + ) + if err != nil { + // If no pending transition exists, then we'll return an + // error. + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("cannot apply transition, "+ + "no pending transition found for %x", + groupKeyBytes) + } + return fmt.Errorf("failed to query pending "+ + "transition: %w", err) + } + transitionID := dbTransition.TransitionID + + // Next, we'll apply all the pending updates to the supply + // sub-trees, then use that to update the root tree. + _, err = applySupplyUpdatesInternal( + ctx, db, assetSpec, transition.PendingUpdates, + ) + if err != nil { + return fmt.Errorf("failed to apply SMT updates: "+ + "%w", err) + } + + // Next, we'll update the supply commitment data, before we do + // that, perform some basic sanity checks. + if !dbTransition.NewCommitmentID.Valid { + return fmt.Errorf("pending transition %d has no "+ + "NewCommitmentID", transitionID) + } + newCommitmentID := dbTransition.NewCommitmentID.Int64 + if !dbTransition.PendingCommitTxnID.Valid { + return fmt.Errorf("pending transition %d has no "+ + "PendingCommitTxnID", transitionID) + } + chainTxnID := dbTransition.PendingCommitTxnID.Int64 + + // Update the commitment record with the calculated root hash + // and sum. + finalRootSupplyRoot, err := applySupplyUpdatesInternal( + ctx, db, assetSpec, transition.PendingUpdates, + ) + if err != nil { + return fmt.Errorf("failed to apply SMT updates: "+ + "%w", err) + } + finalRootHash := finalRootSupplyRoot.NodeHash() + finalRootSum := finalRootSupplyRoot.NodeSum() + err = db.UpdateSupplyCommitmentRoot( + ctx, UpdateSupplyCommitmentRootParams{ + CommitID: newCommitmentID, + SupplyRootHash: finalRootHash[:], + SupplyRootSum: sqlInt64(int64(finalRootSum)), + }, + ) + if err != nil { + return fmt.Errorf("failed to update commitment root "+ + "hash/sum for commit %d: %w", + newCommitmentID, err) + } + + // Next, we'll serialize the merkle proofs and block header, so + // we can update them on disk. + var ( + proofBuf bytes.Buffer + headerBuf bytes.Buffer + ) + chainProof, err := transition.ChainProof.UnwrapOrErr( + fmt.Errorf("chain proof is required"), + ) + if err != nil { + return fmt.Errorf("failed to unwrap "+ + "chain proof: %w", err) + } + err = chainProof.MerkleProof.Encode(&proofBuf) + if err != nil { + return fmt.Errorf("failed to encode "+ + "merkle proof: %w", err) + } + err = chainProof.Header.Serialize(&headerBuf) + if err != nil { + return fmt.Errorf("failed to "+ + "serialize block header: %w", + err) + } + blockHeight := sqlInt32(chainProof.BlockHeight) + + // With all the information serialized above, we'll now update + // the chain proof information for this current supply commit. + err = db.UpdateSupplyCommitmentChainDetails( + ctx, SupplyCommitChainDetails{ + CommitID: newCommitmentID, + MerkleProof: proofBuf.Bytes(), + OutputIndex: sqlInt32(newCommitment.TxOutIdx), + BlockHeader: headerBuf.Bytes(), + ChainTxnID: chainTxnID, + BlockHeight: blockHeight, + }, + ) + if err != nil { + return fmt.Errorf("failed to update commitment chain "+ + "details: %w", err) + } + + // Also update the chain_txns record itself with the + // confirmation details (block hash, height, index). + var commitTxBytes bytes.Buffer + err = newCommitment.Txn.Serialize(&commitTxBytes) + if err != nil { + return fmt.Errorf("failed to serialize commit tx for "+ + "update: %w", err) + } + commitTxid := newCommitment.Txn.TxHash() + + _, err = db.UpsertChainTx(ctx, UpsertChainTxParams{ + Txid: commitTxid[:], + RawTx: commitTxBytes.Bytes(), + ChainFees: 0, + BlockHash: lnutils.ByteSlice( + chainProof.Header.BlockHash(), + ), + BlockHeight: blockHeight, + TxIndex: sqlInt32(chainProof.TxIndex), + }) + if err != nil { + return fmt.Errorf("failed to update chain_txns "+ + "confirmation: %w", err) + } + + // To finish up our book keeping, we'll now finalize the state + // transition on disk. + err = db.FinalizeSupplyCommitTransition(ctx, transitionID) + if err != nil { + return fmt.Errorf("failed to finalize transition: "+ + "%w", err) + } + + // Finally, we'll update the state on disk to be default again, + // while also pointing to the _new_ supply commitment on disk. + // We'll update both the state name and the latest commitment + // ID. + defaultStateName, err := stateToDBString( + &supplycommit.DefaultState{}, + ) + if err != nil { + return fmt.Errorf("error getting default state "+ + "name: %w", err) + } + + _, err = db.UpsertSupplyCommitStateMachine( + ctx, SupplyCommitMachineParams{ + GroupKey: groupKeyBytes, + StateName: sqlStr(defaultStateName), + LatestCommitmentID: dbTransition.NewCommitmentID, //nolint:lll + }) + if err != nil { + return fmt.Errorf("failed to update state machine to "+ + "default: %w", err) + } + + return nil + }) +} + +// Compile-time assertions to ensure SupplyCommitMachine implements the +// interfaces. +var _ supplycommit.CommitmentTracker = (*SupplyCommitMachine)(nil) +var _ supplycommit.StateMachineStore = (*SupplyCommitMachine)(nil) From 8b6b2e0c975b31b2beb61b6168c50c0ba159845c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:50:24 -0700 Subject: [PATCH 7/8] tapdb: add tests for SupplyCommitMachine --- tapdb/supply_commit_test.go | 1815 +++++++++++++++++++++++++++++++++++ 1 file changed, 1815 insertions(+) create mode 100644 tapdb/supply_commit_test.go diff --git a/tapdb/supply_commit_test.go b/tapdb/supply_commit_test.go new file mode 100644 index 000000000..85aeb7a40 --- /dev/null +++ b/tapdb/supply_commit_test.go @@ -0,0 +1,1815 @@ +package tapdb + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "sort" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/mssmt" + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe/supplycommit" + lfn "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/lnutils" + "github.com/stretchr/testify/require" +) + +// supplyCommitTestSetup holds the components initialized by +// setupSupplyCommitTest. +type supplyCommitTestSetup struct { + commitMachine *SupplyCommitMachine + commitTreeStore *SupplyTreeStore + db sqlc.Querier + baseGenesis asset.Genesis + groupPubKey *btcec.PublicKey +} + +// setupSupplyCommitTest initializes the core components needed for supply +// commitment tests. +func setupSupplyCommitTest(t *testing.T) *supplyCommitTestSetup { + t.Helper() + + db := NewTestDB(t) + sqlDB := db.BaseDB + + txCreatorCommit := func(tx *sql.Tx) SupplyCommitStore { + return db.WithTx(tx) + } + batchedDBCommit := NewTransactionExecutor[SupplyCommitStore]( + sqlDB, txCreatorCommit, + ) + commitMachine := NewSupplyCommitMachine(batchedDBCommit) + + // Create a group key that'll be used in the test context. + groupPrivKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + groupPubKey := groupPrivKey.PubKey() + + baseGenesis := asset.RandGenesis(t, asset.Normal) + + ctx := context.Background() + + // Insert the base genesis needed for burn/ignore events. + txCreatorTree := func(tx *sql.Tx) BaseUniverseStore { + return db.WithTx(tx) + } + batchedDBTree := NewTransactionExecutor[BaseUniverseStore]( + db.BaseDB, txCreatorTree, + ) + genesisPointID, err := upsertGenesisPoint( + ctx, batchedDBTree, baseGenesis.FirstPrevOut, + ) + require.NoError(t, err) + _, err = upsertGenesis(ctx, batchedDBTree, genesisPointID, baseGenesis) + require.NoError(t, err) + + // Initialize the SupplyTreeStore using the same batched DB. + commitTreeStore := NewSupplyTreeStore(batchedDBTree) + + return &supplyCommitTestSetup{ + commitMachine: commitMachine, + commitTreeStore: commitTreeStore, + db: db, + baseGenesis: baseGenesis, + groupPubKey: groupPubKey, + } +} + +// addTestMintingBatch inserts a basic minting batch and related data using +// harness components. +func (h *supplyCommitTestHarness) addTestMintingBatch() ([]byte, int64, + *wire.MsgTx, []byte, []byte) { + + h.t.Helper() + + ctx := h.ctx + db := h.db + + batchKeyDesc, _ := test.RandKeyDesc(h.t) + batchKeyBytes := batchKeyDesc.PubKey.SerializeCompressed() + batchKeyID, err := db.UpsertInternalKey( + ctx, sqlc.UpsertInternalKeyParams{ + RawKey: batchKeyBytes, + KeyFamily: int32(batchKeyDesc.Family), + KeyIndex: int32(batchKeyDesc.Index), + }, + ) + require.NoError(h.t, err) + + genesisPoint := test.RandOp(h.t) + genesisPointID, err := upsertGenesisPoint(ctx, db, genesisPoint) + require.NoError(h.t, err) + + mintingTx := wire.NewMsgTx(2) + mintingTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: genesisPoint, + }) + mintingTx.AddTxOut(&wire.TxOut{ + Value: 1000, + PkScript: test.RandBytes(20), + }) + + mintTxBytes, err := encodeTx(mintingTx) + require.NoError(h.t, err) + mintTxID := mintingTx.TxHash() + mintTxDbID, err := db.UpsertChainTx(ctx, sqlc.UpsertChainTxParams{ + Txid: mintTxID[:], + RawTx: mintTxBytes, + }) + require.NoError(h.t, err) + + genesisPointBytes, err := encodeOutpoint(genesisPoint) + require.NoError(h.t, err) + err = db.AnchorGenesisPoint(ctx, sqlc.AnchorGenesisPointParams{ + PrevOut: genesisPointBytes, + AnchorTxID: sqlInt64(mintTxDbID), + }) + require.NoError(h.t, err) + + err = db.NewMintingBatch(ctx, sqlc.NewMintingBatchParams{ + BatchID: batchKeyID, + HeightHint: 100, + CreationTimeUnix: time.Now(), + }) + require.NoError(h.t, err) + + _, err = db.BindMintingBatchWithTx( + ctx, sqlc.BindMintingBatchWithTxParams{ + RawKey: batchKeyDesc.PubKey.SerializeCompressed(), + GenesisID: sqlInt64(genesisPointID), + }, + ) + require.NoError(h.t, err) + + return batchKeyBytes, mintTxDbID, mintingTx, mintTxID[:], mintTxBytes +} + +// stateTransitionOutput encapsulates the results of a simulated state +// transition performed by performSingleTransition. +type stateTransitionOutput struct { + appliedUpdates []supplycommit.SupplyUpdateEvent + internalKey *btcec.PublicKey + outputKey *btcec.PublicKey + commitTx *wire.MsgTx + chainProof supplycommit.ChainProof + txOutIndex uint32 +} + +// supplyCommitTestHarness holds the necessary components for testing the +// SupplyCommitMachine's StateMachineStore implementation. +type supplyCommitTestHarness struct { + t *testing.T + ctx context.Context + commitMachine *SupplyCommitMachine + db sqlc.Querier + groupPubKey *btcec.PublicKey + groupKeyBytes []byte + assetSpec asset.Specifier + baseGenesis asset.Genesis + groupKey *asset.GroupKey + batchedTreeDB BatchedUniverseTree + commitTreeStore *SupplyTreeStore +} + +// newSupplyCommitTestHarness creates a new test harness instance. +func newSupplyCommitTestHarness(t *testing.T) *supplyCommitTestHarness { + t.Helper() + + setup := setupSupplyCommitTest(t) + ctx := context.Background() + + groupKey := &asset.GroupKey{GroupPubKey: *setup.groupPubKey} + + // Use the baseGenesis ID for the initial specifier if needed, or empty + // ID. Let's use an empty ID for a generic harness setup. + spec := asset.NewSpecifierOptionalGroupPubKey( + asset.ID{}, setup.groupPubKey, + ) + + return &supplyCommitTestHarness{ + t: t, + ctx: ctx, + commitMachine: setup.commitMachine, + db: setup.db, + groupPubKey: setup.groupPubKey, + groupKeyBytes: setup.groupPubKey.SerializeCompressed(), + assetSpec: spec, + baseGenesis: setup.baseGenesis, + groupKey: groupKey, + batchedTreeDB: setup.commitTreeStore.db, + commitTreeStore: setup.commitTreeStore, + } +} + +// addTestMintAnchorUniCommitment inserts a mint_anchor_uni_commitments record +// using harness data. +func (h *supplyCommitTestHarness) addTestMintAnchorUniCommitment( + batchKeyBytes []byte, spentBy sql.NullInt64) int64 { + + h.t.Helper() + + internalKey, _ := test.RandKeyDesc(h.t) + internalKeyID, err := h.db.UpsertInternalKey( + h.ctx, sqlc.UpsertInternalKeyParams{ + RawKey: internalKey.PubKey.SerializeCompressed(), + KeyFamily: int32(internalKey.KeyLocator.Family), + KeyIndex: int32(internalKey.KeyLocator.Index), + }, + ) + require.NoError(h.t, err) + + txOutputIndex := int32(test.RandInt[uint32]() % 100) + + anchorCommitID, err := h.db.UpsertMintAnchorUniCommitment( + h.ctx, sqlc.UpsertMintAnchorUniCommitmentParams{ + BatchKey: batchKeyBytes, + TxOutputIndex: txOutputIndex, + TaprootInternalKeyID: internalKeyID, + GroupKey: h.groupKeyBytes, + SpentBy: spentBy, + }, + ) + require.NoError(h.t, err) + + return anchorCommitID +} + +// currentState fetches the current state of the state machine via FetchState. +func (h *supplyCommitTestHarness) currentState() supplycommit.State { + h.t.Helper() + + state, _, err := h.commitMachine.FetchState(h.ctx, h.assetSpec) + require.NoError(h.t, err) + return state +} + +// currentTransition fetches the current pending transition via FetchState. +// Returns None if no transition is pending. +// +//nolint:lll +func (h *supplyCommitTestHarness) currentTransition() lfn.Option[supplycommit.SupplyStateTransition] { + h.t.Helper() + + _, transitionOpt, err := h.commitMachine.FetchState(h.ctx, h.assetSpec) + require.NoError(h.t, err) + return transitionOpt +} + +// assertPendingTransitionExists asserts that a pending (non-finalized) +// transition exists. +// +//nolint:lll +func (h *supplyCommitTestHarness) assertPendingTransitionExists() SupplyCommitTransition { + h.t.Helper() + + dbTransition, err := h.fetchPendingTransition() + require.NoError(h.t, err, "expected pending transition to exist") + require.False(h.t, dbTransition.Finalized) + return dbTransition +} + +// assertNoPendingTransition asserts that no pending (non-finalized) transition +// exists. +func (h *supplyCommitTestHarness) assertNoPendingTransition() { + h.t.Helper() + + _, err := h.fetchPendingTransition() + require.ErrorIs(h.t, err, sql.ErrNoRows, + "expected no pending transition") +} + +// assertTransitionInitialState asserts basic fields for a newly created +// transition. +func (h *supplyCommitTestHarness) assertTransitionInitialState( + dbTransition SupplyCommitTransition) { + + h.t.Helper() + + require.False(h.t, dbTransition.Finalized) + require.False(h.t, dbTransition.NewCommitmentID.Valid) + require.False(h.t, dbTransition.PendingCommitTxnID.Valid) + + // Check that creation time is set and is recent. We allow a small delta + // to account for test execution time and potential slight clock + // differences if the DB were remote (though it's embedded for tests). + require.NotZero(h.t, dbTransition.CreationTime) + creationTime := time.Unix(dbTransition.CreationTime, 0) + require.WithinDuration( + h.t, time.Now(), creationTime, 5*time.Second, + ) +} + +// randMintEvent generates a random mint event using the harness's group key. +func (h *supplyCommitTestHarness) randMintEvent() *supplycommit.NewMintEvent { + gen := randMintEventGen(h.groupPubKey) + mintEvent := gen.Example().(*supplycommit.NewMintEvent) + + // For an asset, the witness isn't encoded when we encode the group key. + mintEvent.IssuanceProof.GenesisWithGroup.GroupKey.Witness = nil + mintEvent.IssuanceProof.Asset.GroupKey.Witness = nil + + mintEvent.IssuanceProof.Asset.ScriptKey.TweakedScriptKey = nil + + return mintEvent +} + +// randBurnEvent generates a random burn event using the harness's base genesis +// and group key. +func (h *supplyCommitTestHarness) randBurnEvent() *supplycommit.NewBurnEvent { + gen := randBurnEventGen(h.baseGenesis, h.groupKey, h.batchedTreeDB) + return gen.Example().(*supplycommit.NewBurnEvent) +} + +// randIgnoreEvent generates a random ignore event using the harness's base +// genesis ID. +// +//nolint:lll +func (h *supplyCommitTestHarness) randIgnoreEvent() *supplycommit.NewIgnoreEvent { + gen := randIgnoreEventGen(h.baseGenesis.ID(), h.batchedTreeDB) + return gen.Example().(*supplycommit.NewIgnoreEvent) +} + +// fetchPendingTransition fetches the current pending transition directly via +// SQL. +func (h *supplyCommitTestHarness) fetchPendingTransition() ( + SupplyCommitTransition, error) { + + var transition SupplyCommitTransition + readTx := ReadTxOption() + err := h.commitMachine.db.ExecTx( + h.ctx, readTx, func(db SupplyCommitStore) error { + var txErr error + transition, txErr = db.QueryPendingSupplyCommitTransition( //nolint:lll + h.ctx, h.groupKeyBytes, + ) + return txErr + }, + ) + return transition, err +} + +// assertCurrentStateIs fetches the current state and asserts it matches the +// expected state type. +func (h *supplyCommitTestHarness) assertCurrentStateIs( + expectedState supplycommit.State) { + + h.t.Helper() + state := h.currentState() + require.IsType(h.t, expectedState, state) +} + +// assertPendingUpdates fetches the current transition, asserts the number of +// pending updates, and compares the serialized versions of the updates with the +// expected events after sorting both slices by their universe leaf key. +func (h *supplyCommitTestHarness) assertPendingUpdates( + expectedEvents []supplycommit.SupplyUpdateEvent) { + + h.t.Helper() + transitionOpt := h.currentTransition() + require.True( + h.t, transitionOpt.IsSome(), + "expected pending transition for update check", + ) + transition := transitionOpt.UnwrapOrFail(h.t) + actualEvents := transition.PendingUpdates + require.Len(h.t, actualEvents, len(expectedEvents)) + + // Create copies to avoid modifying the original slices. + expectedCopy := make( + []supplycommit.SupplyUpdateEvent, len(expectedEvents), + ) + copy(expectedCopy, expectedEvents) + actualCopy := make([]supplycommit.SupplyUpdateEvent, len(actualEvents)) + copy(actualCopy, actualEvents) + + // Define a sorting function based on the UniverseLeafKey. + sorter := func(events []supplycommit.SupplyUpdateEvent) { + sort.SliceStable(events, func(i, j int) bool { + keyI := events[i].UniverseLeafKey().UniverseKey() + keyJ := events[j].UniverseLeafKey().UniverseKey() + // Compare byte slices lexicographically. + return bytes.Compare(keyI[:], keyJ[:]) < 0 + }) + } + + // Sort both copies. + sorter(expectedCopy) + sorter(actualCopy) + + // Compare the serialized bytes of each event pair. There're a lot of + // fields that aren't actually encoded, so a simple equals check won't + // work. + for i := range expectedCopy { + expectedEvent := expectedCopy[i] + actualEvent := actualCopy[i] + + var expectedBytes bytes.Buffer + err := serializeSupplyUpdateEvent(&expectedBytes, expectedEvent) + require.NoError( + h.t, err, "failed to serialize expected event %d", i, + ) + + var actualBytes bytes.Buffer + err = serializeSupplyUpdateEvent(&actualBytes, actualEvent) + require.NoError( + h.t, err, "failed to serialize actual event %d", i, + ) + + require.Equal(h.t, + expectedBytes.String(), actualBytes.String(), + "mismatch for serialized event %d "+ + "(expected %T, actual %T)", + i, expectedEvent, actualEvent, + ) + } +} + +// fetchStateMachine fetches the state machine details directly via SQL. +func (h *supplyCommitTestHarness) fetchStateMachine() ( + QuerySupplyStateMachineResp, error) { + + var stateMachine QuerySupplyStateMachineResp + readTx := ReadTxOption() + err := h.commitMachine.db.ExecTx(h.ctx, readTx, + func(db SupplyCommitStore) error { + var txErr error + stateMachine, txErr = db.QuerySupplyCommitStateMachine( + h.ctx, h.groupKeyBytes, + ) + return txErr + }, + ) + return stateMachine, err +} + +// addTestSupplyCommitment inserts a supply_commitments record using harness +// data. +func (h *supplyCommitTestHarness) addTestSupplyCommitment(chainTxID int64, + txidBytes, rawTxBytes []byte, isConfirmed bool) int64 { + + h.t.Helper() + + ctx := h.ctx + db := h.db + groupKeyBytes := h.groupKeyBytes + groupPubKey := h.groupPubKey + baseGenesis := h.baseGenesis + groupKey := h.groupKey + batchedTreeDB := h.batchedTreeDB + commitDB := h.commitMachine + assetSpec := h.assetSpec + + internalKeyDesc, _ := test.RandKeyDesc(h.t) + internalKeyID, err := db.UpsertInternalKey( + ctx, sqlc.UpsertInternalKeyParams{ + RawKey: internalKeyDesc.PubKey.SerializeCompressed(), + KeyFamily: int32(internalKeyDesc.Family), + KeyIndex: int32(internalKeyDesc.Index), + }, + ) + require.NoError(h.t, err) + + outputKey := test.RandPubKey(h.t) + + // Instantiate generators and create example events. + mintGen := randMintEventGen(groupPubKey) + burnGen := randBurnEventGen(baseGenesis, groupKey, batchedTreeDB) + ignoreGen := randIgnoreEventGen(baseGenesis.ID(), batchedTreeDB) + + exampleMint := mintGen.Example() + exampleBurn := burnGen.Example() + exampleIgnore := ignoreGen.Example() + + // Apply the dummy updates to create the SMT roots implicitly. + var ( + finalRoot mssmt.Node + ) + err = commitDB.db.ExecTx( + ctx, WriteTxOption(), func(dbtx SupplyCommitStore) error { + finalRoot, err = applySupplyUpdatesInternal( + ctx, dbtx, assetSpec, + []supplycommit.SupplyUpdateEvent{ + exampleMint, exampleBurn, exampleIgnore, + }, + ) + return err + }, + ) + require.NoError(h.t, err) + + // Update the chain TX confirmation status. + var blockHash []byte + var blockHeight sql.NullInt32 + if isConfirmed { + blockHash = test.RandBytes(32) + blockHeight = sqlInt32(123) + } + + // Upsert the chain tx with confirmation details (or lack thereof). + _, err = db.UpsertChainTx(ctx, sqlc.UpsertChainTxParams{ + Txid: txidBytes, + RawTx: rawTxBytes, + ChainFees: 0, + BlockHash: blockHash, + BlockHeight: blockHeight, + TxIndex: sqlInt32(1), + }) + require.NoError(h.t, err) + + commitID, err := db.InsertSupplyCommitment( + ctx, sqlc.InsertSupplyCommitmentParams{ + GroupKey: groupKeyBytes, + ChainTxnID: chainTxID, + InternalKeyID: internalKeyID, + OutputKey: outputKey.SerializeCompressed(), + BlockHeight: blockHeight, + BlockHeader: test.RandBytes(80), + MerkleProof: test.RandBytes(64), + OutputIndex: sqlInt32(0), + SupplyRootHash: lnutils.ByteSlice(finalRoot.NodeHash()), + SupplyRootSum: sqlInt64(finalRoot.NodeSum()), + }, + ) + require.NoError(h.t, err) + return commitID +} + +// addTestStateMachine inserts a supply_commit_state_machines record +// using harness data. +func (h *supplyCommitTestHarness) addTestStateMachine( + latestCommitID sql.NullInt64) { + + h.t.Helper() + + _, err := h.db.UpsertSupplyCommitStateMachine( + h.ctx, sqlc.UpsertSupplyCommitStateMachineParams{ + GroupKey: h.groupKeyBytes, + StateName: sqlStr("DefaultState"), + LatestCommitmentID: latestCommitID, + }, + ) + require.NoError(h.t, err) +} + +// fetchCommitmentByID fetches a commitment by ID directly via SQL. +func (h *supplyCommitTestHarness) fetchCommitmentByID( + commitID int64) (sqlc.SupplyCommitment, error) { + + var commitment sqlc.SupplyCommitment + readTx := ReadTxOption() + err := h.commitMachine.db.ExecTx(h.ctx, readTx, + func(db SupplyCommitStore) error { + var txErr error + commitment, txErr = db.QuerySupplyCommitment( + h.ctx, commitID, + ) + return txErr + }, + ) + return commitment, err +} + +// fetchInternalKeyByID fetches an internal key by ID directly via SQL. +// +//nolint:lll +func (h *supplyCommitTestHarness) fetchInternalKeyByID(keyID int64) FetchInternalKeyByIDRow { + h.t.Helper() + var keyRow FetchInternalKeyByIDRow + readTx := ReadTxOption() + err := h.commitMachine.db.ExecTx(h.ctx, readTx, + func(db SupplyCommitStore) error { + var txErr error + keyRow, txErr = db.FetchInternalKeyByID(h.ctx, keyID) + return txErr + }, + ) + require.NoError(h.t, err) + return keyRow +} + +// fetchChainTxByID fetches a chain tx by ID directly via SQL. +func (h *supplyCommitTestHarness) fetchChainTxByID(txID int64, +) (FetchChainTxByIDRow, error) { + + var chainTx FetchChainTxByIDRow + readTx := ReadTxOption() + err := h.commitMachine.db.ExecTx(h.ctx, readTx, + func(db SupplyCommitStore) error { + var txErr error + chainTx, txErr = db.FetchChainTxByID(h.ctx, txID) + return txErr + }, + ) + return chainTx, err +} + +// linkTxToPendingTransition manually updates the pending_commit_txn_id for the +// current pending transition. +func (h *supplyCommitTestHarness) linkTxToPendingTransition(chainTxID int64) { + h.t.Helper() + dbTransition := h.assertPendingTransitionExists() + + writeTx := WriteTxOption() + err := h.commitMachine.db.ExecTx( + h.ctx, writeTx, func(db SupplyCommitStore) error { + // Link the TX ID, NewCommitmentID remains NULL for now. + //nolint:lll + return db.UpdateSupplyCommitTransitionCommitment( + h.ctx, UpdateSupplyCommitTransitionCommitmentParams{ + PendingCommitTxnID: sqlInt64(chainTxID), + NewCommitmentID: sql.NullInt64{}, + TransitionID: dbTransition.TransitionID, + }, + ) + }, + ) + require.NoError(h.t, err) +} + +// confirmChainTx confirms a chain tx directly via SQL. +func (h *supplyCommitTestHarness) confirmChainTx(txID int64, txidBytes, + rawTxBytes []byte) { + + blockHash := test.RandBytes(32) + blockHeight := sqlInt32(test.RandInt[int32]()) + txIndex := sqlInt32(test.RandInt[int32]()) + _, err := h.db.UpsertChainTx(h.ctx, sqlc.UpsertChainTxParams{ + Txid: txidBytes, + RawTx: rawTxBytes, + ChainFees: 0, + BlockHash: blockHash, + BlockHeight: blockHeight, + TxIndex: txIndex, + }) + require.NoError(h.t, err) +} + +// performSingleTransition simulates a full state transition cycle: inserting +// updates, inserting the signed commit TX (which sets DB state), and finally +// calling ApplyStateTransition with simulated confirmation data. It returns the +// list of updates applied, the generated keys, the commit TX, and the simulated +// chain proof details for assertion purposes. +func (h *supplyCommitTestHarness) performSingleTransition( + updates []supplycommit.SupplyUpdateEvent) stateTransitionOutput { + + h.t.Helper() + + // Assert initial state is DefaultState (assuming test starts clean). + h.assertCurrentStateIs(&supplycommit.DefaultState{}) + + // First, we'll insert the set of pending updates into the DB. This'll + // create a new state transition record with the updates. + for _, event := range updates { + err := h.commitMachine.InsertPendingUpdate( + h.ctx, h.assetSpec, event, + ) + require.NoError(h.t, err) + } + + // Assert state transitioned to UpdatesPendingState. + h.assertCurrentStateIs(&supplycommit.UpdatesPendingState{}) + + // Next, we'll generate a new "fake" commitment transaction along with + // sample internal and output keys. + commitTx := randTx(h.t, 1) + internalKey := test.RandPubKey(h.t) + outputKey := test.RandPubKey(h.t) + + // We'll now simulate the next phase of the state transition where we + // make the new commitment, capture that in a new commit tx, then sign + // and commit that. + commitDetails := supplycommit.SupplyCommitTxn{ + Txn: commitTx, + InternalKey: internalKey, + OutputKey: outputKey, + OutputIndex: 1, + } + err := h.commitMachine.InsertSignedCommitTx( + h.ctx, h.assetSpec, commitDetails, + ) + require.NoError(h.t, err) + + // Assert state transitioned to CommitBroadcastState. + h.assertCurrentStateIs(&supplycommit.CommitBroadcastState{}) + + // Next, we''ll make a fake confirmation proof, this'll be used when we + // go to apply the state transition, which is only meant to be done once + // it has confirmed on chain. + blockHeader := &wire.BlockHeader{ + Version: int32(test.RandInt[uint32]()), + PrevBlock: test.RandHash(), + MerkleRoot: test.RandHash(), + Timestamp: time.Unix(test.RandInt[int64](), 0), + Bits: test.RandInt[uint32](), + Nonce: test.RandInt[uint32](), + } + merkleProof := proof.TxMerkleProof{ + Bits: []bool{test.RandBool()}, + Nodes: []chainhash.Hash{test.RandHash()}, + } + blockHeight := uint32(test.RandInt[int32]()) + chainProof := supplycommit.ChainProof{ + Header: *blockHeader, + BlockHeight: blockHeight, + MerkleProof: merkleProof, + } + + // With the signed commitment inserted above, we'll now fetch the state + // on disk, as we'll use this to construct the new commitment based on + // the set of updates. + _, currentTransitionOpt, err := h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(h.t, err, "failed fetching state before apply") + require.True( + h.t, currentTransitionOpt.IsSome(), + "expected transition before apply", + ) + currentTransition := currentTransitionOpt.UnwrapOrFail(h.t) + + // Given the on disk information of the current state transition + // (contains the old commitment), we'll now use that along with the + // updates to make what we expect the new supply root to be. + expectedNewRoot := h.calculateExpectedRoot( + currentTransition.OldCommitment, updates, + ) + + // With all the above gathered, we'll now make the state transition + // object as expected, then apply the state transition. + applyTransition := supplycommit.SupplyStateTransition{ + OldCommitment: currentTransition.OldCommitment, + PendingUpdates: updates, + NewCommitment: supplycommit.RootCommitment{ + Txn: commitTx, + TxOutIdx: test.RandInt[uint32](), + InternalKey: internalKey, + OutputKey: outputKey, + SupplyRoot: expectedNewRoot, + }, + ChainProof: lfn.Some(chainProof), + } + err = h.commitMachine.ApplyStateTransition( + h.ctx, h.assetSpec, applyTransition, + ) + require.NoError(h.t, err) + + return stateTransitionOutput{ + appliedUpdates: updates, + internalKey: internalKey, + outputKey: outputKey, + commitTx: commitTx, + chainProof: chainProof, + txOutIndex: applyTransition.NewCommitment.TxOutIdx, + } +} + +// applyTreeUpdates takes a map of in-memory supply sub-trees and applies a list +// of pending updates to them. It returns the map containing the updated trees. +// This function operates purely in-memory and is used for calculating expected +// SMT roots during testing. +func applyTreeUpdates(supplyTrees supplycommit.SupplyTrees, + pendingUpdates []supplycommit.SupplyUpdateEvent, +) (supplycommit.SupplyTrees, error) { + + ctx := context.Background() + + // For each tree update, we'll select the proper tree, then insert apply + // the update to said tree. + for _, treeUpdate := range pendingUpdates { + // Obtain the universe leaf key and node directly from the event + // using the interface methods. + leafKey := treeUpdate.UniverseLeafKey() + leafValue, err := treeUpdate.UniverseLeafNode() + if err != nil { + return nil, fmt.Errorf("unable to create leaf node "+ + "for update event %T: %w", treeUpdate, err) + } + + targetTree := supplyTrees.FetchOrCreate( + treeUpdate.SupplySubTreeType(), + ) + + _, err = targetTree.Insert( + ctx, leafKey.UniverseKey(), leafValue, + ) + if err != nil { + return nil, fmt.Errorf("unable to insert leaf into "+ + "target tree %v: %w", + treeUpdate.SupplySubTreeType(), err) + } + + supplyTrees[treeUpdate.SupplySubTreeType()] = targetTree + } + + return supplyTrees, nil +} + +// calculateExpectedRoot calculates the expected SMT root after applying +// updates. +func (h *supplyCommitTestHarness) calculateExpectedRoot( + oldCommitmentOpt lfn.Option[supplycommit.RootCommitment], + updates []supplycommit.SupplyUpdateEvent) *mssmt.BranchNode { + + h.t.Helper() + + // If we have a root commitment, then we'll fetch the root tree from DB + // (not the first state transition). Otherwise, we'll just start with a + // new blank tree. + oldRootTreeRes := lfn.MapOption( + //nolint:lll + func(oldCommitment supplycommit.RootCommitment) lfn.Result[mssmt.Tree] { + treeRes := h.commitTreeStore.FetchRootSupplyTree( + h.ctx, h.assetSpec, + ) + fetchedTree, err := treeRes.Unpack() + if err != nil { + return lfn.Err[mssmt.Tree](fmt.Errorf("failed "+ + "fetching root tree: %w", err)) + } + + return lfn.Ok(fetchedTree) + }, + )(oldCommitmentOpt).UnwrapOr( + lfn.Ok[mssmt.Tree](mssmt.NewCompactedTree( + mssmt.NewDefaultStore()), + ), + ) + + oldRootTree, err := oldRootTreeRes.Unpack() + require.NoError(h.t, err) + + // We'll now copy over the old root tree to a temporary tree, so we can + // insert the updates directly. + tempRootTree := mssmt.NewCompactedTree(mssmt.NewDefaultStore()) + err = oldRootTree.Copy(h.ctx, tempRootTree) + require.NoError(h.t, err) + + // Next, we'll read out all the existing sub-tres, to store in a map as + // we'll insert into them below. + tempSubTrees := make(supplycommit.SupplyTrees) + for _, treeType := range []supplycommit.SupplySubTree{ + supplycommit.IgnoreTreeType, + supplycommit.MintTreeType, supplycommit.BurnTreeType, + } { + subTree, err := h.commitTreeStore.FetchSubTree( + h.ctx, h.assetSpec, treeType, + ).Unpack() + require.NoError(h.t, err) + + if subTree == nil { + continue + } + + // If we have a sub-tree created, then we'll copy it into the in + // memory tree, so we can insert the updates directly. + // Otherwise, we'll make a new blank one. + tempSubTrees[treeType] = mssmt.NewCompactedTree( + mssmt.NewDefaultStore(), + ) + err = subTree.Copy(h.ctx, tempSubTrees[treeType]) + require.NoError(h.t, err) + } + + // Apply the set of updates to each of the sub-trees we have in memory. + updatedSubTrees, err := applyTreeUpdates(tempSubTrees, updates) + require.NoError(h.t, err) + + // With the sub-trees updated, we can now update the root tree by + // inserting in the new sub-tree. + for treeType, subTree := range updatedSubTrees { + subRoot, err := subTree.Root(h.ctx) + require.NoError(h.t, err) + + leafNode := mssmt.NewLeafNode(lnutils.ByteSlice( + subRoot.NodeHash()), subRoot.NodeSum(), + ) + leafKey := treeType.UniverseKey() + + _, err = tempRootTree.Insert(h.ctx, leafKey, leafNode) + require.NoError(h.t, err) + } + + // Obtain the finalroot tree after our insertions. + finalRoot, err := tempRootTree.Root(h.ctx) + require.NoError(h.t, err) + + return finalRoot +} + +// assertTransitionApplied verifies the state of the database and SMTs after +// ApplyStateTransition has successfully completed. We ensure that the state +// machine was updated properly, the new trees are in place, and the transition +// is properly finalized. +func (h *supplyCommitTestHarness) assertTransitionApplied( + output stateTransitionOutput) { + + h.t.Helper() + + commitTxid := output.commitTx.TxHash() + internalKey := output.internalKey + appliedUpdates := output.appliedUpdates + outputKey := output.outputKey + chainProof := output.chainProof + + // Verify via FetchState that the machine is back in DefaultState and + // there's no pending transition object returned. + fetchedState, fetchedTransitionOpt, err := h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(h.t, err, "FetchState failed after apply") + require.IsType( + h.t, &supplycommit.DefaultState{}, fetchedState, + "state should be DefaultState after apply", + ) + require.False( + h.t, fetchedTransitionOpt.IsSome(), + "no pending transition should be returned after apply", + ) + + // Next, we'll verify that the internal state machine matches the same + // state, and we'll then assert that the commitment pointer was updated + // properly on disk. + stateMachine, err := h.fetchStateMachine() + require.NoError(h.t, err) + require.Equal( + h.t, int32(0), stateMachine.CurrentStateID, + "state should be DefaultState", + ) + require.True( + h.t, stateMachine.LatestCommitmentID.Valid, + "LatestCommitmentID should be set", + ) + + latestCommitmentID := stateMachine.LatestCommitmentID.Int64 + + // At this point, there should be no pending transition, on disk (it + // should have been finalized). + h.assertNoPendingTransition() + + // We should be able to fetch the latest commitment on disk now. + dbCommitment, err := h.fetchCommitmentByID(latestCommitmentID) + require.NoError(h.t, err) + + // The transaction we inserted on disk as the latest commitment tx + // should also be found now. + chainTxRecord, err := h.db.FetchChainTx(h.ctx, commitTxid[:]) + require.NoError(h.t, err) + require.Equal( + h.t, chainTxRecord.TxnID, dbCommitment.ChainTxnID, + "commitment linked to wrong ChainTxnID", + ) + + // The keys should also be inserted, and the db commitment should match + // what we inserted. + require.Equal( + h.t, internalKey.SerializeCompressed(), + h.fetchInternalKeyByID(dbCommitment.InternalKeyID).RawKey, + "internalKey mismatch", + ) + require.Equal( + h.t, outputKey.SerializeCompressed(), dbCommitment.OutputKey, + "outputKey mismatch", + ) + + // Check stored root hash/sum are not empty. + require.NotNil( + h.t, dbCommitment.SupplyRootHash, + "SupplyRootHash should be set", + ) + require.True( + h.t, dbCommitment.SupplyRootSum.Valid, + "SupplyRootSum should be set", + ) + + // All the chain details should should also be populated as expected. + require.True( + h.t, dbCommitment.BlockHeight.Valid, + "blockHeight should be set", + ) + require.Equal( + h.t, int32(chainProof.BlockHeight), + dbCommitment.BlockHeight.Int32, "blockHeight mismatch", + ) + require.NotEmpty( + h.t, dbCommitment.BlockHeader, "blockHeader should be set", + ) + require.NotEmpty( + h.t, dbCommitment.MerkleProof, "merkleProof should be set", + ) + require.True( + h.t, dbCommitment.OutputIndex.Valid, + "outputIndex should be set", + ) + require.Equal( + h.t, int32(output.txOutIndex), dbCommitment.OutputIndex.Int32, + ) + + // As a final step, we'll verify that the root supply tree for the asset + // spec matches what we re-created in memory. + rootSupplyTreeRes := h.commitTreeStore.FetchRootSupplyTree( + h.ctx, h.assetSpec, + ) + rootSupplyTree, err := rootSupplyTreeRes.Unpack() + require.NoError(h.t, err) + finalRootSupplyNode, err := rootSupplyTree.Root(h.ctx) + require.NoError(h.t, err) + + // Compare DB stored root with calculated root from fetched tree. + require.Equal( + h.t, lnutils.ByteSlice(finalRootSupplyNode.NodeHash()), + dbCommitment.SupplyRootHash, + ) + require.Equal( + h.t, int64(finalRootSupplyNode.NodeSum()), + dbCommitment.SupplyRootSum.Int64, + ) + + // We'll now run through all the updates that should have been applied, + // and verify that we can verify a merkle tree inclusion proof for them. + subTreeRoots := make(map[supplycommit.SupplySubTree]mssmt.Node) + for _, event := range appliedUpdates { + treeType := event.SupplySubTreeType() + leafKey := event.UniverseLeafKey() + leafNode, err := event.UniverseLeafNode() + require.NoError(h.t, err) + + // We'll now fetch the sub-tree on disk, to make our assertio + // below. + subTreeRes := h.commitTreeStore.FetchSubTree( + h.ctx, h.assetSpec, treeType, + ) + subTree, err := subTreeRes.Unpack() + require.NoError(h.t, err) + + subTreeRoot, err := subTree.Root(h.ctx) + require.NoError(h.t, err) + + // We'll store this root later to make sure that we can verify + // an inclusion proof from the in memory root tree we created. + subTreeRoots[treeType] = subTreeRoot + + // We'll now generate a merkle proof from the sub-tree on disk + // for this update. We should be able to verify it np. + subProof, err := subTree.MerkleProof( + h.ctx, leafKey.UniverseKey(), + ) + require.NoError(h.t, err) + isValidSubProof := mssmt.VerifyMerkleProof( + leafKey.UniverseKey(), leafNode, subProof, subTreeRoot, + ) + require.True( + h.t, isValidSubProof, + "invalid sub-tree proof for %v key %x", treeType, + leafKey.UniverseKey(), + ) + } + + // As a final set of assertions, we'll verify that we can generate then + // verify a merkle proof for each of the sub-trees based on the root + // supply tree. + for treeType, subTreeRoot := range subTreeRoots { + rootTreeLeafKey := treeType.UniverseKey() + rootTreeLeafNode := mssmt.NewLeafNode( + lnutils.ByteSlice(subTreeRoot.NodeHash()), + subTreeRoot.NodeSum(), + ) + + rootProof, err := rootSupplyTree.MerkleProof( + h.ctx, rootTreeLeafKey, + ) + require.NoError(h.t, err) + isValidRootProof := mssmt.VerifyMerkleProof( + rootTreeLeafKey, rootTreeLeafNode, rootProof, + finalRootSupplyNode, + ) + require.True( + h.t, isValidRootProof, + "invalid root tree proof for sub-tree %v", treeType, + ) + } + + // Finally, use the public SupplyCommit method to fetch the latest + // confirmed commitment and verify its fields match our expectations. + fetchedCommitRes := h.commitMachine.SupplyCommit(h.ctx, h.assetSpec) + fetchedCommitOpt, err := fetchedCommitRes.Unpack() + require.NoError(h.t, err, "SupplyCommit failed") + require.True( + h.t, fetchedCommitOpt.IsSome(), "SupplyCommit should return a "+ + "commitment", + ) + + fetchedCommit := fetchedCommitOpt.UnwrapOrFail(h.t) + + // Make sure this matches what the output state transition set. + require.Equal( + h.t, output.commitTx.TxHash(), fetchedCommit.Txn.TxHash(), + "SupplyCommit returned wrong Txn hash", + ) + require.Equal( + h.t, output.internalKey.SerializeCompressed(), + fetchedCommit.InternalKey.SerializeCompressed(), + "SupplyCommit returned wrong InternalKey", + ) + require.Equal( + h.t, output.outputKey.SerializeCompressed(), + fetchedCommit.OutputKey.SerializeCompressed(), + "SupplyCommit returned wrong OutputKey", + ) + require.Equal( + h.t, uint32(dbCommitment.OutputIndex.Int32), + fetchedCommit.TxOutIdx, + "SupplyCommit returned wrong TxOutIdx", + ) + require.Equal( + h.t, dbCommitment.SupplyRootHash, + lnutils.ByteSlice(fetchedCommit.SupplyRoot.NodeHash()), + "SupplyCommit returned wrong SupplyRoot hash", + ) + require.Equal( + h.t, dbCommitment.SupplyRootSum.Int64, + int64(fetchedCommit.SupplyRoot.NodeSum()), + "SupplyCommit returned wrong SupplyRoot sum", + ) +} + +// TestSupplyCommitInsertPendingUpdate tests the insertion of pending updates. +func TestSupplyCommitInsertPendingUpdate(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + var insertedEvents []supplycommit.SupplyUpdateEvent + + // First, we'll insert a new update of a minting event. + event1 := h.randMintEvent() + insertedEvents = append(insertedEvents, event1) + err := h.commitMachine.InsertPendingUpdate( + h.ctx, h.assetSpec, event1, + ) + require.NoError(t, err) + + // Verify state machine is now in UpdatesPendingState. + h.assertCurrentStateIs(&supplycommit.UpdatesPendingState{}) + + // Verify a pending transition exists and is in initial state. + dbTransition1 := h.assertPendingTransitionExists() + h.assertTransitionInitialState(dbTransition1) + + // There should be a pending update now, and after we read it from disk + // it should exactly match what we inserted. + h.assertPendingUpdates(insertedEvents) + + // Next, we'll insert a new update type. + event2 := h.randBurnEvent() + insertedEvents = append(insertedEvents, event2) + err = h.commitMachine.InsertPendingUpdate(h.ctx, h.assetSpec, event2) + require.NoError(t, err) + + // Verify state is still UpdatesPendingState. + h.assertCurrentStateIs(&supplycommit.UpdatesPendingState{}) + + // Verify it uses the same transition. + dbTransition2 := h.assertPendingTransitionExists() + require.Equal(t, dbTransition1.TransitionID, dbTransition2.TransitionID) + + // Verify two events are now stored. + h.assertPendingUpdates(insertedEvents) + + // Next, we'll insert yet another update type. + event3 := h.randIgnoreEvent() + insertedEvents = append(insertedEvents, event3) + err = h.commitMachine.InsertPendingUpdate(h.ctx, h.assetSpec, event3) + require.NoError(t, err) + + // Once again the state should be the same, and the transition ID should + // match. + h.assertCurrentStateIs(&supplycommit.UpdatesPendingState{}) + dbTransition3 := h.assertPendingTransitionExists() + require.Equal(t, dbTransition1.TransitionID, dbTransition3.TransitionID) + + // Verify three events are now stored. + h.assertPendingUpdates(insertedEvents) + + // Next, let's test an error path, we'll set the state to broadcast, + // then attempt to insert a new event. + err = h.commitMachine.CommitState( + h.ctx, h.assetSpec, &supplycommit.CommitBroadcastState{}, + ) + require.NoError(t, err) + h.assertCurrentStateIs(&supplycommit.CommitBroadcastState{}) + + // Attempting to insert now should fail. + event4 := h.randMintEvent() + err = h.commitMachine.InsertPendingUpdate(h.ctx, h.assetSpec, event4) + require.Error(t, err) + require.ErrorContains( + t, err, "cannot insert pending update "+ + "in state: CommitBroadcastState", + ) + + // Verify no new event was added. + h.assertPendingUpdates(insertedEvents) +} + +// TestSupplyCommitInsertSignedCommitTx tests associating a signed commit tx +// with a transition. +func TestSupplyCommitInsertSignedCommitTx(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // First, insert a pending update to create the initial transition + // record. + event1 := h.randMintEvent() + err := h.commitMachine.InsertPendingUpdate( + h.ctx, h.assetSpec, event1, + ) + require.NoError(t, err) + + // Verify the transition exists but has no commit TX linked yet. + dbTransition := h.assertPendingTransitionExists() + require.False(t, dbTransition.PendingCommitTxnID.Valid) + + // Create a _first_ dummy commit TX, insert it into chain_txns, and + // manually link it to the transition. This simulates the state after + // funding but before signing/finalizing. + commitTx1 := randTx(t, 1) + commitTxid1 := commitTx1.TxHash() + commitRawTx1, err := encodeTx(commitTx1) + require.NoError(t, err) + chainTxID1, err := h.db.UpsertChainTx(h.ctx, sqlc.UpsertChainTxParams{ + Txid: commitTxid1[:], + RawTx: commitRawTx1, + }) + require.NoError(t, err) + h.linkTxToPendingTransition(chainTxID1) + + // Now, create the _second_ (final, signed) commit TX. + commitTx2 := randTx(t, 1) + commitTxid2 := commitTx2.TxHash() + + // Insert the signed commitment with the updated transaction. + internalKey := test.RandPubKey(t) + outputKey := test.RandPubKey(t) + commitDetails := supplycommit.SupplyCommitTxn{ + Txn: commitTx2, + InternalKey: internalKey, + OutputKey: outputKey, + OutputIndex: 1, + } + err = h.commitMachine.InsertSignedCommitTx( + h.ctx, h.assetSpec, commitDetails, + ) + require.NoError(t, err) + + // Verify the state machine transitioned to CommitBroadcastState. + h.assertCurrentStateIs(&supplycommit.CommitBroadcastState{}) + + // Verify the transition record now points to the DB ID of the second + // (final) commit TX and the new commitment ID. + dbTransition = h.assertPendingTransitionExists() + require.True(t, dbTransition.PendingCommitTxnID.Valid) + require.True(t, dbTransition.NewCommitmentID.Valid) + newCommitmentID := dbTransition.NewCommitmentID.Int64 + + // Fetch the chain_txns record for the second commit TX to get its ID. + chainTx2Record, err := h.db.FetchChainTx( + h.ctx, commitTxid2[:], + ) + require.NoError(t, err) + + // Assert that the transition points to the correct chain tx ID. + require.Equal( + t, chainTx2Record.TxnID, dbTransition.PendingCommitTxnID.Int64, + ) + + // Assert that a new commitment record was inserted. + newDbCommitment, err := h.fetchCommitmentByID(newCommitmentID) + require.NoError(t, err) + require.Equal(t, chainTx2Record.TxnID, newDbCommitment.ChainTxnID) + require.Equal(t, + internalKey.SerializeCompressed(), + h.fetchInternalKeyByID(newDbCommitment.InternalKeyID).RawKey, + ) + require.Equal( + t, outputKey.SerializeCompressed(), newDbCommitment.OutputKey, + ) + require.Equal( + t, int(commitDetails.OutputIndex), + int(newDbCommitment.OutputIndex.Int32), + ) + + // Use FetchState to verify the NewCommitment field is populated + // correctly in the returned transition object. + _, fetchedTransitionOpt, err := h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + fetchedTransition := fetchedTransitionOpt.UnwrapOrFail(t) + require.NoError(t, err) + require.NotNil(t, fetchedTransition.NewCommitment.Txn) + require.Equal( + t, commitTxid2, fetchedTransition.NewCommitment.Txn.TxHash(), + ) + require.Equal( + t, internalKey.SerializeCompressed(), + fetchedTransition.NewCommitment.InternalKey.SerializeCompressed(), //nolint:lll + ) + require.Equal( + t, outputKey.SerializeCompressed(), + fetchedTransition.NewCommitment.OutputKey.SerializeCompressed(), + ) +} + +// TestSupplyCommitState tests committing different state machine states. +func TestSupplyCommitState(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // To start with, we'll insert a new state machine to disk. + h.addTestStateMachine(sql.NullInt64{}) + + allStates := []supplycommit.State{ + &supplycommit.DefaultState{}, + &supplycommit.UpdatesPendingState{}, + &supplycommit.CommitTreeCreateState{}, + &supplycommit.CommitTxCreateState{}, + &supplycommit.CommitTxSignState{}, + &supplycommit.CommitBroadcastState{}, + &supplycommit.CommitFinalizeState{}, + } + + // We'll now run through all the tests, then make sure that when we + // commit a new state, we can read it out again properly to ensure that + // it has been committed. + for _, targetState := range allStates { + t.Run(targetState.String(), func(t *testing.T) { + err := h.commitMachine.CommitState( + h.ctx, h.assetSpec, targetState, + ) + require.NoError(t, err) + + // Verify that via the public we can verify the state + // was committed. + fetchedState, _, err := h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.Equal( + t, targetState.String(), fetchedState.String(), + ) + + // We'll also fetch via the direct SQL API to verify + // that things have been committed properly. + stateMachineRow, err := h.fetchStateMachine() + require.NoError(t, err) + expectedStateID, err := stateToInt(targetState) + require.NoError(t, err) + require.Equal( + t, expectedStateID, + stateMachineRow.CurrentStateID, + ) + }) + } +} + +// TestSupplyCommitFetchState tests fetching the state machine state and +// transition details. +func TestSupplyCommitFetchState(t *testing.T) { + t.Parallel() + h := newSupplyCommitTestHarness(t) + + // If not state machine exists, then just the default state should be + // returned. + state, transitionOpt, err := h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.IsType(t, &supplycommit.DefaultState{}, state) + require.False(t, transitionOpt.IsSome()) + + // Now, we'll create a state machine, then query for the state again. We + // should still get the default state, but not transition should exist + // yet. + h.addTestStateMachine(sql.NullInt64{}) + state, transitionOpt, err = h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.IsType(t, &supplycommit.DefaultState{}, state) + require.False(t, transitionOpt.IsSome()) + + // Next, we'll make a pending update, then query for the state again. We + // should also be able to query for the transition inserted on disk. + event1 := h.randMintEvent() + err = h.commitMachine.InsertPendingUpdate( + h.ctx, h.assetSpec, event1, + ) + require.NoError(t, err) + _, err = h.fetchPendingTransition() + require.NoError(t, err) + + // Now we'll query the state again, after we inserted the pending + // update, the state should have transitioned. + state, transitionOpt, err = h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.IsType(t, &supplycommit.UpdatesPendingState{}, state) + require.True(t, transitionOpt.IsSome()) + transition := transitionOpt.UnwrapOrFail(t) + require.False(t, transition.OldCommitment.IsSome()) + require.Len(t, transition.PendingUpdates, 1) + require.Nil(t, transition.NewCommitment.Txn) + require.False(t, transition.ChainProof.IsSome()) + + // Next, we'll insert a signed commitment transaction. + commitTx := randTx(t, 1) + internalKey := test.RandPubKey(t) + outputKey := test.RandPubKey(t) + + commitDetails := supplycommit.SupplyCommitTxn{ + Txn: commitTx, + InternalKey: internalKey, + OutputKey: outputKey, + OutputIndex: 1, + } + err = h.commitMachine.InsertSignedCommitTx( + h.ctx, h.assetSpec, commitDetails, + ) + require.NoError(t, err) + + // After the commitment transaction was inserted, the state should be + // updated, and the pending commitment accounted for. + state, transitionOpt, err = h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.IsType(t, &supplycommit.CommitBroadcastState{}, state) + require.True(t, transitionOpt.IsSome()) + transition = transitionOpt.UnwrapOrFail(t) + require.False(t, transition.OldCommitment.IsSome()) + require.Len(t, transition.PendingUpdates, 1) + require.NotNil(t, transition.NewCommitment.Txn) + require.Equal( + t, commitTx.TxHash(), transition.NewCommitment.Txn.TxHash(), + ) + require.False(t, transition.ChainProof.IsSome()) + + // Next, we'll simulate a chain confirmation by confirming the + // transaction on-chain. + updatedTransition, err := h.fetchPendingTransition() + require.NoError(t, err) + require.True(t, updatedTransition.PendingCommitTxnID.Valid) + commitTxID := updatedTransition.PendingCommitTxnID.Int64 + commitTxRow, err := h.fetchChainTxByID(commitTxID) + require.NoError(t, err) + h.confirmChainTx( + commitTxID, lnutils.ByteSlice(commitTx.TxHash()), + commitTxRow.RawTx, + ) + + // As a final step, we'll modify the state machine to finalize the + // transition, and go back to the default state. + writeTx := WriteTxOption() + err = h.commitMachine.db.ExecTx(h.ctx, writeTx, func(db SupplyCommitStore) error { //nolint:lll + _, err := db.UpsertSupplyCommitStateMachine( + h.ctx, SupplyCommitMachineParams{ + GroupKey: h.groupKeyBytes, + LatestCommitmentID: updatedTransition.NewCommitmentID, //nolint:lll + StateName: sqlStr("DefaultState"), + }) + if err != nil { + return err + } + return db.FinalizeSupplyCommitTransition( + h.ctx, updatedTransition.TransitionID, + ) + }, + ) + require.NoError(t, err) + + // Now that the transition is finalized, the state should be default, + // and no state transition should be found (it's now finalized). + state, transitionOpt, err = h.commitMachine.FetchState( + h.ctx, h.assetSpec, + ) + require.NoError(t, err) + require.IsType(t, &supplycommit.DefaultState{}, state) + require.False(t, transitionOpt.IsSome()) +} + +// TestSupplyCommitApplyStateTransition tests the full state transition +// application using the public StateMachineStore interface methods via the test +// harness. +func TestSupplyCommitApplyStateTransition(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // To kick off our test, we'll perform a single state transition. This + // entails: adding a set of pending updates, committing the signed + // commit tx, and finally applying the state transition. After + // application, we should find that the transition is now final, the + // state machine points to the latest commitment, and all the supply + // tress have been updated. + updates1 := []supplycommit.SupplyUpdateEvent{ + h.randMintEvent(), h.randBurnEvent(), + } + stateTransition1 := h.performSingleTransition(updates1) + h.assertTransitionApplied(stateTransition1) + + // To ensure that we can perform multiple transitions, we'll now do + // another one, with a new set of events, and then assert that it's been + // applied properly. + updates2 := []supplycommit.SupplyUpdateEvent{ + h.randMintEvent(), h.randIgnoreEvent(), + } + stateTransition2 := h.performSingleTransition(updates2) + h.assertTransitionApplied(stateTransition2) +} + +// TestSupplyCommitUnspentPrecommits tests the UnspentPrecommits method. +func TestSupplyCommitUnspentPrecommits(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // Use a spec specific to this test if needed, or the harness default. + // Let's create one based on the harness group key but a random ID. + spec := asset.NewSpecifierOptionalGroupPubKey( + asset.RandID(t), h.groupPubKey, + ) + + // To start with, we shouldn't have any precommits. + precommitsRes := h.commitMachine.UnspentPrecommits(h.ctx, spec) + precommits, err := precommitsRes.Unpack() + require.NoError(t, err) + require.Empty(t, precommits) + + // Next, we'll add a new minting batch, and a pre-commit along with it. + batchKeyBytes, _, mintTx1, _, _ := h.addTestMintingBatch() + _ = h.addTestMintAnchorUniCommitment(batchKeyBytes, sql.NullInt64{}) + + // At this point, we should find a single pre commitment on disk. + precommitsRes = h.commitMachine.UnspentPrecommits(h.ctx, spec) + precommits, err = precommitsRes.Unpack() + require.NoError(t, err) + require.Len(t, precommits, 1) + require.Equal(t, mintTx1.TxHash(), precommits[0].MintingTxn.TxHash()) + + // Next, we'll add another pre commitment, and this time associate it + // (spend it) by a supply commitment. + //nolint:lll + batchKeyBytes, commitTxDbID2, _, commitTxid2, commitRawTx2 := + h.addTestMintingBatch() + commitID2 := h.addTestSupplyCommitment( + commitTxDbID2, commitTxid2, commitRawTx2, false, + ) + _ = h.addTestMintAnchorUniCommitment(batchKeyBytes, sqlInt64(commitID2)) + + // We should now find two pre-commitments. + precommitsRes = h.commitMachine.UnspentPrecommits(h.ctx, spec) + precommits, err = precommitsRes.Unpack() + require.NoError(t, err) + require.Len(t, precommits, 2) + + // Next, we'll confirm the transaction associated with the second pre + // commitment spend. + blockHash := test.RandBytes(32) + blockHeight := sqlInt32(123) + txIndex := sqlInt32(1) + _, err = h.db.UpsertChainTx(h.ctx, sqlc.UpsertChainTxParams{ + Txid: commitTxid2, + RawTx: commitRawTx2, + ChainFees: 0, + BlockHash: blockHash, + BlockHeight: blockHeight, + TxIndex: txIndex, + }) + require.NoError(t, err) + + // As the transaction was confirmed above, we should now only have a + // single pre commitment on disk. + precommitsRes = h.commitMachine.UnspentPrecommits(h.ctx, spec) + precommits, err = precommitsRes.Unpack() + require.NoError(t, err) + require.Len(t, precommits, 1) + + // If we pick a new random public key, then we shouldn't be able to find + // any pre-commitments for it. + otherGroupKey := test.RandPubKey(t) + otherSpec := asset.NewSpecifierOptionalGroupPubKey( + asset.RandID(t), otherGroupKey, + ) + precommitsRes = h.commitMachine.UnspentPrecommits(h.ctx, otherSpec) + precommits, err = precommitsRes.Unpack() + require.NoError(t, err) + require.Empty(t, precommits) + + // Finally, trying with a missing group key should yield an error. + emptySpec := asset.NewSpecifierOptionalGroupKey(asset.RandID(t), nil) + precommitsRes = h.commitMachine.UnspentPrecommits(h.ctx, emptySpec) + require.ErrorIs(t, precommitsRes.Err(), ErrMissingGroupKey) +} + +// TestSupplyCommitMachineFetch tests the SupplyCommit method. +func TestSupplyCommitMachineFetch(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // Use a spec specific to this test if needed, or the harness default. + spec := asset.NewSpecifierOptionalGroupPubKey( + asset.RandID(t), h.groupPubKey, + ) + + // At the very start, we shouldn't have any commitments at all for this + // spec. + commitRes := h.commitMachine.SupplyCommit(h.ctx, spec) + commitOpt, err := commitRes.Unpack() + require.NoError(t, err) + require.True(t, commitOpt.IsNone()) + + // If we add a state machine, then we should still find no commitments. + h.addTestStateMachine(sql.NullInt64{}) + commitRes = h.commitMachine.SupplyCommit(h.ctx, spec) + commitOpt, err = commitRes.Unpack() + require.NoError(t, err) + require.True(t, commitOpt.IsNone()) + + // Next, we'll add a new supply commitment, and also a state machine to + // go along with it which we'll link to the commitment. + //nolint:lll + _, commitTxDbID1, commitTx1, commitTxid1, commitRawTx1 := + h.addTestMintingBatch() + commitID1 := h.addTestSupplyCommitment( + commitTxDbID1, commitTxid1, commitRawTx1, false, + ) + h.addTestStateMachine(sqlInt64(commitID1)) + + // At this point, with the default query, as the above commitment isn't + // confirmed yet, we should still find no commitments. + commitRes = h.commitMachine.SupplyCommit(h.ctx, spec) + commitOpt, err = commitRes.Unpack() + require.NoError(t, err) + require.True(t, commitOpt.IsNone()) + + // If we now confirm the commitment we created, then we should find the + // supply commitment. + blockHash := test.RandBytes(32) + blockHeight := sqlInt32(123) + txIndex := sqlInt32(1) + _, err = h.db.UpsertChainTx(h.ctx, sqlc.UpsertChainTxParams{ + Txid: commitTxid1, + RawTx: commitRawTx1, + ChainFees: 0, + BlockHash: blockHash, + BlockHeight: blockHeight, + TxIndex: txIndex, + }) + require.NoError(t, err) + + // We should find the supply commitment has been found now, as we + // confirmed it above. + commitRes = h.commitMachine.SupplyCommit(h.ctx, spec) + commitOpt, err = commitRes.Unpack() + require.NoError(t, err) + require.False(t, commitOpt.IsNone()) + + // Fetch the commitment details directly for comparison. + var dbCommit sqlc.SupplyCommitment + readTx := ReadTxOption() + err = h.commitMachine.db.ExecTx( + h.ctx, readTx, func(dbtx SupplyCommitStore) error { + var txErr error + dbCommit, txErr = dbtx.QuerySupplyCommitment( + h.ctx, commitID1, + ) + return txErr + }, + ) + require.NoError(t, err) + + // We'll now assert that the populated commitment we just read matches + // what we have on disk. + rootCommit := commitOpt.UnwrapOrFail(t) + require.Equal(t, commitTx1.TxHash(), rootCommit.Txn.TxHash()) + require.Equal( + t, uint32(dbCommit.OutputIndex.Int32), rootCommit.TxOutIdx, + ) + + dbInternalKeyRow, err := h.db.FetchInternalKeyByID( + h.ctx, dbCommit.InternalKeyID, + ) + require.NoError(t, err) + require.Equal( + t, dbInternalKeyRow.RawKey, + rootCommit.InternalKey.SerializeCompressed(), + ) + require.Equal( + t, dbCommit.OutputKey, + rootCommit.OutputKey.SerializeCompressed(), + ) + + // Verify the root constructed from stored hash/sum. + require.NotNil(t, rootCommit.SupplyRoot) + require.Equal( + t, dbCommit.SupplyRootHash, + lnutils.ByteSlice(rootCommit.SupplyRoot.NodeHash()), + ) + require.Equal( + t, dbCommit.SupplyRootSum.Int64, + int64(rootCommit.SupplyRoot.NodeSum()), + ) + + // Assert that using a different group key should yield no commitments. + emptySpec := asset.NewSpecifierOptionalGroupKey(asset.RandID(t), nil) + commitRes = h.commitMachine.SupplyCommit(h.ctx, emptySpec) + require.ErrorIs(t, commitRes.Err(), ErrMissingGroupKey) +} + +// randTx creates a random transaction for testing purposes. +func randTx(t *testing.T, numOutputs int) *wire.MsgTx { + t.Helper() + + tx := wire.NewMsgTx(2) + tx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: test.RandOp(t), + }) + + for i := 0; i < numOutputs; i++ { + tx.AddTxOut(&wire.TxOut{ + Value: 1000, + PkScript: test.RandBytes(22), + }) + } + + return tx +} + +// TestSupplyCommitMultipleSupplyCommitments tests that multiple rows can be +// inserted into the supply_commitments table without violating foreign key +// constraints related to SMT root namespaces, reusing the existing test setup +// helper. +func TestSupplyCommitMultipleSupplyCommitments(t *testing.T) { + t.Parallel() + + h := newSupplyCommitTestHarness(t) + + // Helper to generate unique transaction data for each commitment + genTxData := func() (int64, []byte, []byte) { + genesisPoint := test.RandOp(h.t) + tx := wire.NewMsgTx(2) + tx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: genesisPoint, + }) + tx.AddTxOut(&wire.TxOut{ + Value: 1000, + PkScript: test.RandBytes(20), + }) + + txBytes, err := encodeTx(tx) + require.NoError(h.t, err) + txid := tx.TxHash() + chainTxID, err := h.db.UpsertChainTx( + h.ctx, sqlc.UpsertChainTxParams{ + Txid: txid[:], + RawTx: txBytes, + }, + ) + require.NoError(h.t, err) + return chainTxID, txid[:], txBytes + } + + // Insert the first commitment using the harness method. + chainTxID1, txid1, rawTx1 := genTxData() + _ = h.addTestSupplyCommitment( + chainTxID1, txid1, rawTx1, false, + ) + + // Insert the second commitment with the same group key, but distinct + // data, using the harness method. + chainTxID2, txid2, rawTx2 := genTxData() + _ = h.addTestSupplyCommitment( + chainTxID2, txid2, rawTx2, false, + ) + + // If we reached here without errors during the addTestSupplyCommitment + // calls (which includes InsertSupplyCommitment), the test passes. +} + +func encodeTx(tx *wire.MsgTx) ([]byte, error) { + var buf bytes.Buffer + err := tx.Serialize(&buf) + return buf.Bytes(), err +} From 277c8f377f1f4b0cb78e0f3069160a5f347b3a24 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 2 May 2025 20:56:30 -0700 Subject: [PATCH 8/8] tapdb: add supply_commit.md documentation --- tapdb/supply_commit.md | 190 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 tapdb/supply_commit.md diff --git a/tapdb/supply_commit.md b/tapdb/supply_commit.md new file mode 100644 index 000000000..661fdf39c --- /dev/null +++ b/tapdb/supply_commit.md @@ -0,0 +1,190 @@ +# Taproot Asset Supply Commitment Persistence + +This document details the database schema and Go implementation (`tapdb/supply_commit.go`) that provides persistent storage for the Taproot Asset Supply Commitment State Machine (`universe/supplycommit`). This layer ensures the state machine's progress is durable across restarts and provides mechanisms for tracking historical and pending supply commitments. + +## Purpose + +The primary function of this persistence layer is to reliably store the state and associated data for the `universe/supplycommit` state machine. This state machine manages the process of creating and anchoring cryptographic commitments to the supply changes (mints, burns, ignores) of a specific asset group onto the Bitcoin blockchain. + +This involves storing: + +1. The current operational state of the state machine for each asset group. +2. Details of past, successfully confirmed supply commitments. +3. Information about pending state transitions, including the specific supply update events being processed. +4. Details of the Bitcoin transactions used to anchor commitments. + +## Schema Overview + +Migration `000036_asset_commit.up.sql` introduces several tables to manage the state machine's lifecycle and data. + +1. **`supply_commit_states`**: An enum-like table defining the possible states of the state machine (e.g., `DefaultState`, `UpdatesPendingState`, `CommitTxCreateState`). + * `id` (INTEGER PK): Numeric ID for the state. + * `state_name` (TEXT UNIQUE): Human-readable name of the state. + +2. **`supply_commit_update_types`**: An enum-like table defining the types of supply updates (e.g., `mint`, `burn`, `ignore`). + * `id` (INTEGER PK): Numeric ID for the update type. + * `update_type_name` (TEXT UNIQUE): Human-readable name of the update type. + +3. **`supply_commitments`**: Stores the details of a specific, potentially confirmed, supply commitment anchored on-chain. + * `commit_id` (INTEGER PK): Unique identifier for this commitment instance. + * `group_key` (BLOB): The tweaked group key identifying the asset group. + * `chain_txn_id` (BIGINT FK -> `chain_txns.txn_id`): Reference to the Bitcoin transaction containing this commitment. + * `output_index` (INTEGER): The output index within the `chain_txn_id` transaction. + * `internal_key_id` (BIGINT FK -> `internal_keys.key_id`): The internal key used for the commitment output's Taproot derivation. + * `output_key` (BLOB): The final tweaked Taproot output key. + * `block_header` (BLOB): Header of the block confirming the commitment (NULL if unconfirmed). + * `block_height` (INTEGER): Height of the confirming block (NULL if unconfirmed). + * `merkle_proof` (BLOB): Merkle proof of the transaction's inclusion in the block (NULL if unconfirmed). + * `supply_root_hash` (BLOB): The MS-SMT root hash of the supply tree at this commitment (NULL until finalized). + * `supply_root_sum` (BIGINT): The MS-SMT root sum (total supply value) at this commitment (NULL until finalized). + +4. **`supply_commit_state_machines`**: Tracks the current state for each asset group's state machine instance. + * `group_key` (BLOB PK): The tweaked group key, uniquely identifying the state machine instance. + * `current_state_id` (INTEGER FK -> `supply_commit_states.id`): The current operational state. + * `latest_commitment_id` (BIGINT FK -> `supply_commitments.commit_id`): Reference to the most recently *finalized* and confirmed commitment (NULL if none). + +5. **`supply_commit_transitions`**: Records an active attempt to transition the supply state. Acts like a Write-Ahead Log (WAL) entry for the state machine. + * `transition_id` (INTEGER PK): Unique identifier for this transition attempt. + * `state_machine_group_key` (BLOB FK -> `supply_commit_state_machines.group_key`): Links back to the state machine instance. + * `old_commitment_id` (BIGINT FK -> `supply_commitments.commit_id`): The commitment being replaced (NULL for the first commitment). + * `new_commitment_id` (BIGINT FK -> `supply_commitments.commit_id`): The new commitment being created by this transition (NULL initially). + * `pending_commit_txn_id` (BIGINT FK -> `chain_txns.txn_id`): The Bitcoin transaction intended to confirm this transition (NULL until created/signed). + * `finalized` (BOOLEAN): Indicates if the transition completed successfully and was applied. Defaults to `FALSE`. + * `creation_time` (TIMESTAMP): When the transition was initiated. + * `UNIQUE INDEX ... WHERE finalized = 0`: Crucially ensures only *one* non-finalized (pending) transition can exist per `state_machine_group_key` at any time. + +6. **`supply_update_events`**: Stores the individual mint, burn, or ignore events associated with a *pending* transition. + * `event_id` (INTEGER PK): Unique identifier for the event record. + * `transition_id` (BIGINT FK -> `supply_commit_transitions.transition_id` ON DELETE CASCADE): Links to the parent transition. Cascade delete ensures events are cleaned up if a transition is aborted/deleted. + * `update_type_id` (INTEGER FK -> `supply_commit_update_types.id`): Type of the update event. + * `event_data` (BLOB): Serialized data specific to the event type (e.g., `NewMintEvent`, `NewBurnEvent`). + +7. **`ALTER TABLE mint_anchor_uni_commitments`**: Adds a `spent_by` column (FK -> `supply_commitments.commit_id`) to track which supply commitment transaction spent a given minting pre-commitment output. + +### Schema Relationships (Mermaid Diagram) + +```mermaid +erDiagram + supply_commit_state_machines ||--o{ supply_commit_transitions : "has_pending" + supply_commit_state_machines }|--|| supply_commit_states : "uses_state" + supply_commit_state_machines }|--|| supply_commitments : "tracks_latest" + supply_commit_transitions ||--o{ supply_update_events : "includes_events" + supply_commit_transitions }|--|| supply_commitments : "replaces_old" + supply_commit_transitions }|--|| supply_commitments : "creates_new" + supply_commit_transitions }|--|| chain_txns : "uses_pending_tx" + supply_update_events ||--|| supply_commit_update_types : "uses_type" + supply_commitments ||--|| chain_txns : "included_in_tx" + supply_commitments ||--|| internal_keys : "uses_internal_key" + mint_anchor_uni_commitments }|--|| supply_commitments : "spent_by_commit" + + supply_commit_states { + INTEGER id PK + TEXT state_name UK + } + + supply_commit_update_types { + INTEGER id PK + TEXT update_type_name UK + } + + supply_commitments { + INTEGER commit_id PK + BLOB group_key "Idx" + BIGINT chain_txn_id FK + INTEGER output_index + BIGINT internal_key_id FK + BLOB output_key + BLOB supply_root_hash + BIGINT supply_root_sum + } + + supply_commit_state_machines { + BLOB group_key PK + INTEGER current_state_id FK + BIGINT latest_commitment_id FK + } + + supply_commit_transitions { + INTEGER transition_id PK + BLOB state_machine_group_key FK + BIGINT old_commitment_id FK + BIGINT new_commitment_id FK + BIGINT pending_commit_txn_id FK + BOOLEAN finalized "Default=0" + TIMESTAMP creation_time + } + + supply_update_events { + INTEGER event_id PK + BIGINT transition_id FK + INTEGER update_type_id FK + BLOB event_data + } + + chain_txns { + INTEGER txn_id PK + BLOB txid UK + BLOB raw_tx + INTEGER block_height + BLOB block_hash + INTEGER tx_index + } + + internal_keys { + INTEGER key_id PK + BLOB raw_key UK + } + + mint_anchor_uni_commitments { + INTEGER id PK + BIGINT batch_id FK + INTEGER tx_output_index + BLOB taproot_internal_key + BLOB group_key + BIGINT spent_by FK + } +``` + +## State Machine Persistence Logic + +The database tables work together to provide durable storage and a recovery mechanism for the supply commitment state machine. + +* **Current State:** The `supply_commit_state_machines` table acts as the primary indicator of the current operational state (`current_state_id`) for a given asset group (`group_key`). It also points to the `latest_commitment_id` that has been successfully processed and confirmed. + +* **Pending Transitions (WAL):** The `supply_commit_transitions` table is key to the persistence strategy. Due to the unique index `supply_commit_transitions_single_pending_idx` (on `state_machine_group_key` WHERE `finalized = 0`), only one *active* transition can exist per asset group. This record acts like a Write-Ahead Log (WAL) entry: + * It captures the *intent* to move from `old_commitment_id` to `new_commitment_id`. + * It aggregates all necessary information for the transition *before* it's fully finalized. + * The `supply_update_events` associated with this transition (linked via `transition_id`) store the specific data (mints, burns, ignores) driving the change. + * References to the `new_commitment_id` (in `supply_commitments`) and `pending_commit_txn_id` (in `chain_txns`) are added as the state machine progresses through transaction creation and signing. + +* **Commitment Data:** The `supply_commitments` table stores the immutable details of *each* commitment attempt once it reaches the stage of having a potential on-chain transaction. Initially, confirmation details (`block_height`, `merkle_proof`, etc.) and the final SMT root (`supply_root_hash`, `supply_root_sum`) are NULL. + +* **Lifecycle & Recovery:** + 1. **Initiation (`InsertPendingUpdate`):** When the first `SupplyUpdateEvent` arrives for an idle state machine, a new row is inserted into `supply_commit_transitions` (`finalized=0`), and the event is stored in `supply_update_events`. The state machine's state in `supply_commit_state_machines` is set to `UpdatesPendingState`. Subsequent events for this group add more rows to `supply_update_events` linked to the *same* pending transition. + 2. **Transaction Creation/Signing (`InsertSignedCommitTx`):** When the state machine creates and signs the commitment transaction, a new row is added to `chain_txns`, a new row is added to `supply_commitments` (with NULL confirmation/root details), and the *pending* `supply_commit_transitions` row is updated with `new_commitment_id` and `pending_commit_txn_id`. The state machine's state moves to `CommitBroadcastState`. + 3. **Confirmation & Finalization (`ApplyStateTransition`):** Once the transaction (`pending_commit_txn_id`) confirms: + * The corresponding `supply_commitments` row (`new_commitment_id`) is updated with block details, merkle proof, and the final calculated `supply_root_hash` and `supply_root_sum` (derived by applying the `supply_update_events` to the SMTs via `applySupplyUpdatesInternal`). + * The corresponding `chain_txns` row is updated with block hash/height/index. + * The `supply_commit_transitions` row is marked `finalized = 1`. + * The `supply_commit_state_machines` row is updated: `current_state_id` becomes `DefaultState`, and `latest_commitment_id` is set to the `new_commitment_id` of the just-finalized transition. + 4. **Restart:** Upon restart, the system queries `supply_commit_state_machines` for the current state. If it's not `DefaultState`, it queries `supply_commit_transitions` for the single pending (`finalized=0`) transition and reconstructs the `SupplyStateTransition` object (including fetching associated `supply_update_events` and commitment details) to resume operation from the correct point (e.g., re-broadcasting, waiting for confirmation, or finalizing). + +## Implementation (`tapdb/SupplyCommitMachine`) + +The `tapdb.SupplyCommitMachine` struct implements the `supplycommit.CommitmentTracker` and `supplycommit.StateMachineStore` interfaces, bridging the gap between the abstract state machine logic and the SQL database. + +* **Core:** It embeds a `BatchedSupplyCommitStore`, which provides access to the necessary SQLc queries and transaction management (`ExecTx`). +* **Interface Mapping:** + * `UnspentPrecommits`: Queries `mint_anchor_uni_commitments` filtering by `group_key` and `spent_by IS NULL`. + * `SupplyCommit`: Queries `supply_commit_state_machines` to get `latest_commitment_id` for the group, then queries `supply_commitments` using that ID. + * `InsertPendingUpdate`: Manages the logic described in "Lifecycle & Recovery - Initiation", using `UpsertSupplyCommitStateMachine`, `QueryExistingPendingTransition`, `InsertSupplyCommitTransition`, `InsertSupplyUpdateEvent`. + * `InsertSignedCommitTx`: Implements the logic from "Lifecycle & Recovery - Transaction Creation/Signing", using `QueryPendingSupplyCommitTransition`, `UpsertChainTx`, `UpsertInternalKey`, `InsertSupplyCommitment`, `UpdateSupplyCommitTransitionCommitment`, `UpsertSupplyCommitStateMachine`. + * `CommitState`: Updates `current_state_id` in `supply_commit_state_machines` via `UpsertSupplyCommitStateMachine`. + * `FetchState`: Reconstructs the current state and the pending `SupplyStateTransition` (if any) by querying `supply_commit_state_machines`, `supply_commit_transitions`, `supply_update_events`, `supply_commitments`, `chain_txns`, and `internal_keys`. It handles deserializing event data and reconstructing commitment objects. + * `ApplyStateTransition`: Executes the finalization logic ("Lifecycle & Recovery - Confirmation & Finalization"). It calls `applySupplyUpdatesInternal` (from `supply_tree.go`) to persist SMT changes, then uses `UpdateSupplyCommitmentRoot`, `UpdateSupplyCommitmentChainDetails`, `UpsertChainTx`, `FinalizeSupplyCommitTransition`, and `UpsertSupplyCommitStateMachine` to update the database records accordingly. + +## Atomicity and Recovery + +All multi-step database modifications within `SupplyCommitMachine` methods are wrapped in database transactions using `db.ExecTx`. This ensures that operations like inserting a transition and its first event, or updating commitment details and finalizing the transition, are atomic. + +The persistence of the current state in `supply_commit_state_machines` and the detailed logging of the single pending transition in `supply_commit_transitions` (acting as a WAL) allow the state machine to reliably recover and resume its operation after restarts, preventing duplicate commitments or loss of pending updates.