Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 22 additions & 9 deletions chain/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/decred/dcrd/blockchain/stake/v5"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/crypto/blake256"
"github.com/decred/dcrd/gcs/v4"
"github.com/decred/dcrd/mixing/mixpool"
"github.com/decred/dcrd/wire"
"github.com/jrick/wsrpc/v2"
Expand Down Expand Up @@ -223,6 +224,11 @@ func (s *Syncer) getHeaders(ctx context.Context) error {
return err
}

birthday, err := s.wallet.BirthState(ctx)
if err != nil {
return err
}

startedSynced := s.walletSynced.Load()

cnet := s.wallet.ChainParams().Net
Expand Down Expand Up @@ -255,15 +261,22 @@ func (s *Syncer) getHeaders(ctx context.Context) error {
g.Go(func() error {
header := headers[i]
hash := header.BlockHash()
filter, proofIndex, proof, err := s.rpc.CFilterV2(ctx, &hash)
if err != nil {
return err
}

err = validate.CFilterV2HeaderCommitment(cnet, header,
filter, proofIndex, proof)
if err != nil {
return err
var filter *gcs.FilterV2
if birthday == nil || birthday.AfterBirthday(header) {
var (
proofIndex uint32
proof []chainhash.Hash
)
filter, proofIndex, proof, err = s.rpc.CFilterV2(ctx, &hash)
if err != nil {
return err
}

err = validate.CFilterV2HeaderCommitment(cnet, header,
filter, proofIndex, proof)
if err != nil {
return err
}
}

nodes[i] = wallet.NewBlockNode(header, &hash, filter, nil)
Expand Down
15 changes: 12 additions & 3 deletions rpc/documentation/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -1109,6 +1109,8 @@ ___
The `ImportPrivateKey` method imports a private key in Wallet Import Format
(WIF) encoding to a wallet account. A rescan may optionally be started to
search for transactions involving the private key's associated payment address.
If the private key deals with transactions before the wallet birthday, if set,
a rescan must be performed to download missing cfilters.

**Request:** `ImportPrivateKeyRequest`

Expand Down Expand Up @@ -1144,7 +1146,9 @@ ___

The `ImportScript` method imports a script into the wallet. A rescan may
optionally be started to search for transactions involving the script, either
as an output or in a P2SH input.
as an output or in a P2SH input. If the script deals with transactions before
the wallet birthday, if set, a rescan must be performed to download missing
cfilters.

**Request:** `ImportScriptRequest`

Expand Down Expand Up @@ -1191,7 +1195,9 @@ seed for a hierarchical deterministic private key that is imported into the
wallet with the supplied name and locked with the supplied password. Addresses
derived from this account MUST NOT be sent any funds. They are solely for the
use of creating stake submission scripts. A rescan may optionally be started to
search for tickets using submission scripts derived from this account.
search for tickets using submission scripts derived from this account. If tickets
would exist before the wallet birthday, if set, a rescan must be performed to
download missing cfilters.

**Request:** `ImportVotingAccountFromSeedRequest`

Expand Down Expand Up @@ -2690,7 +2696,10 @@ or account must be unlocked.
#### `BirthBlock`

The `BirthBlock` method returns the wallets birthday block if set. Rescans
should generally be started from after this block.
should generally be started from after this block. If a birthday is set cfilters
from before the birthday may not be downloaded. A rescan from height will move
the birthday to the rescan height and download all missing cfilters from that
height.

**Request:** `BirthBlockRequest`

Expand Down
8 changes: 8 additions & 0 deletions spv/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -1662,6 +1662,11 @@ func (s *Syncer) initialSyncHeaders(ctx context.Context) error {
return res
}

birthday, err := s.wallet.BirthState(ctx)
if err != nil {
return err
}

// Stage 1: fetch headers.
headersChan := make(chan *headersBatch)
g.Go(func() error {
Expand Down Expand Up @@ -1737,6 +1742,9 @@ func (s *Syncer) initialSyncHeaders(ctx context.Context) error {
s.sidechainMu.Lock()
var missingCfilter []*wallet.BlockNode
for i := range batch.bestChain {
if birthday != nil && !birthday.AfterBirthday(batch.bestChain[i].Header) {
continue
}
if batch.bestChain[i].FilterV2 == nil {
missingCfilter = batch.bestChain[i:]
break
Expand Down
41 changes: 40 additions & 1 deletion wallet/rescan.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,47 @@ func (w *Wallet) Rescan(ctx context.Context, n NetworkBackend, startHash *chainh
func (w *Wallet) RescanFromHeight(ctx context.Context, n NetworkBackend, startHeight int32) error {
const op errors.Op = "wallet.RescanFromHeight"

bs, err := w.BirthState(ctx)
if err != nil {
return errors.E(op, err)
}
if bs != nil && int32(bs.Height) > startHeight {
// If our birthday is before the rescan height, we may
// not have the cfilters needed. Set birthday to the rescan
// height and download the filters. This may take some time
// depending on network conditions and amount of filters missing.
bs := &udb.BirthdayState{
SetFromHeight: true,
Height: uint32(startHeight),
}
if err := w.SetBirthStateAndScan(ctx, bs); err != nil {
return errors.E(op, err)
}
fetchMissing := true
if err := walletdb.Update(ctx, w.db, func(dbtx walletdb.ReadWriteTx) error {
if _, err := udb.MissingCFiltersHeight(dbtx, startHeight); err != nil {
// errors.NotExist is returned if we already have all filters
// from start height. If we have them there is no need to
// fetch them again.
if errors.Is(err, errors.NotExist) {
fetchMissing = false
return nil
}
return err
}
return w.txStore.SetMissingMainChainCFilters(dbtx, false)
}); err != nil {
return errors.E(op, err)
}
if fetchMissing {
if err := w.FetchMissingCFilters(ctx, n); err != nil {
return errors.E(op, err)
}
}
}

var startHash chainhash.Hash
err := walletdb.View(ctx, w.db, func(tx walletdb.ReadTx) error {
err = walletdb.View(ctx, w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
var err error
startHash, err = w.txStore.GetMainChainBlockHashForHeight(
Expand Down
93 changes: 60 additions & 33 deletions wallet/udb/txmined.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ func (s *Store) MainChainTip(dbtx walletdb.ReadTx) (chainhash.Hash, int32) {
// If the block is already inserted and part of the main chain, an errors.Exist
// error is returned.
//
// The main chain tip may not be extended unless compact filters have been saved
// for all existing main chain blocks.
// The main chain may be extended without cfilters if this block is before the
// wallet birthday. If the filter is nil it will not be saved to the database.
func (s *Store) ExtendMainChain(ns walletdb.ReadWriteBucket, header *wire.BlockHeader, blockHash *chainhash.Hash, f *gcs2.FilterV2) error {
height := int32(header.Height)
if height < 1 {
Expand Down Expand Up @@ -266,9 +266,12 @@ func (s *Store) ExtendMainChain(ns walletdb.ReadWriteBucket, header *wire.BlockH
return err
}

// Save the compact filter.
bcf2Key := blockcf2.Key(&header.MerkleRoot)
return putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, f.Bytes()))
// Save the compact filter if we have it.
if f != nil {
bcf2Key := blockcf2.Key(&header.MerkleRoot)
return putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, f.Bytes()))
}
return nil
}

// ProcessedTxsBlockMarker returns the hash of the block which records the last
Expand Down Expand Up @@ -331,6 +334,17 @@ type BirthdayState struct {
SetFromHeight, SetFromTime bool
}

// AfterBirthday returns whether the given block header is at or after the
// birthday. If SetFromTime is true, the header's timestamp is compared against
// the birthday time. Otherwise, the header's height is compared against the
// birthday height.
func (bs *BirthdayState) AfterBirthday(h *wire.BlockHeader) bool {
if bs.SetFromTime {
return h.Timestamp.After(bs.Time)
}
return h.Height >= bs.Height
}

// SetBirthState sets the birthday state in the database. *BirthdayState must
// not be nil.
//
Expand Down Expand Up @@ -402,19 +416,37 @@ func (s *Store) IsMissingMainChainCFilters(dbtx walletdb.ReadTx) bool {
return len(v) != 1 || v[0] == 0
}

// SetMissingMainChainCFilters sets whether we have all of the main chain
// cfilters. Should be used to set missing to false if the wallet birthday is
// moved back in time.
func (s *Store) SetMissingMainChainCFilters(dbtx walletdb.ReadWriteTx, have bool) error {
Copy link
Member

@jrick jrick Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

instead of a boolean value, we could just record the earliest block height that we have saved cfilters from. If that's 0, it's the same as this recording true. If nonzero, it's the same as false but gives us additional information to work with (and return in the function just below this).

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, all current wallets are 0 so this will not need a db upgrade. Working on it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think maybe we still want to set whether we have all filters. If we do a rescan and the wallet is shut off before all of the filters are downloaded, we need to know that when starting up.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, do you mean replace the boolean with a block number? I guess it would need an upgrade in the case of false.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I looked into doing this but unsure if it simplifies the process. If we only save a number, and zero has the meaning that we have them all, then we loose the value of having the number except while downloading. I think the current code handles that well enough though, and it is only when changing the birthday. It may also require a new db version to replace the current boolean if I understand correctly.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The birthday height would have the same meaning as a new db value, I believe.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

true, i think two fields should be used, the height/block of the first cfilter, and the last cfilter

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Am I understanding correctly in that we should replace the bool or add more. I'll tinker some more.

Copy link
Member Author

@JoeGruffins JoeGruffins Sep 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The birthday would be where we want to save from, but it could be earlier. So another db value for where the actual first cfilter should be, and where the last one is. The last one will be the head block filter once we are synced, will we update that every block?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It still looks like more code for no gain to me unless I am mistaking something. The problem is that if we set the birthday back a second time there's a new gap, so we either need to save more variables or look through everything similarly to what we are doing now.

haveB := []byte{0}
if have {
haveB = []byte{1}
}
err := dbtx.ReadWriteBucket(wtxmgrBucketKey).Put(rootHaveCFilters, haveB)
if err != nil {
return errors.E(errors.IO, err)
}
return nil
}

// MissingCFiltersHeight returns the first main chain block height
// with a missing cfilter. Errors with NotExist when all main chain
// blocks record cfilters.
func (s *Store) MissingCFiltersHeight(dbtx walletdb.ReadTx) (int32, error) {
func MissingCFiltersHeight(dbtx walletdb.ReadTx, fromHeight int32) (int32, error) {
ns := dbtx.ReadBucket(wtxmgrBucketKey)
c := ns.NestedReadBucket(bucketBlocks).ReadCursor()
defer c.Close()
for k, v := c.First(); k != nil; k, v = c.Next() {
for k, v := c.Seek(keyBlockRecord(fromHeight)); k != nil; k, v = c.Next() {
hash := extractRawBlockRecordHash(v)
_, _, err := fetchRawCFilter2(ns, hash)
if errors.Is(err, errors.NotExist) {
height := int32(byteOrder.Uint32(k))
return height, nil
if err != nil {
if errors.Is(err, errors.NotExist) {
height := int32(byteOrder.Uint32(k))
return height, nil
}
return 0, errors.E(errors.IO, err)
}
}
return 0, errors.E(errors.NotExist)
Expand Down Expand Up @@ -442,42 +474,37 @@ func (s *Store) InsertMissingCFilters(dbtx walletdb.ReadWriteTx, blockHashes []*
}

for i, blockHash := range blockHashes {
// Ensure that blockHashes are ordered and that all previous cfilters in the
// main chain are known.
// Ensure that blockHashes are ordered.
header := existsBlockHeader(ns, blockHash[:])
if header == nil {
return errors.E(errors.NotExist, errors.Errorf("missing header for block %v", blockHash))
}
ok := i == 0 && *blockHash == s.chainParams.GenesisHash
var bcf2Key [gcs2.KeySize]byte
if !ok {
header := existsBlockHeader(ns, blockHash[:])
if header == nil {
return errors.E(errors.NotExist, errors.Errorf("missing header for block %v", blockHash))
}
parentHash := extractBlockHeaderParentHash(header)
merkleRoot := extractBlockHeaderMerkleRoot(header)
merkleRootHash, err := chainhash.NewHash(merkleRoot)
if err != nil {
return errors.E(errors.Invalid, errors.Errorf("invalid stored header %v", blockHash))
}
bcf2Key = blockcf2.Key(merkleRootHash)
if i == 0 {
_, _, err := fetchRawCFilter2(ns, parentHash)
ok = err == nil
} else {
ok = bytes.Equal(parentHash, blockHashes[i-1][:])
if i != 0 {
if !bytes.Equal(parentHash, blockHashes[i-1][:]) {
return errors.E(errors.Invalid, "block hashes are not ordered")
}
}
}
if !ok {
return errors.E(errors.Invalid, "block hashes are not ordered or previous cfilters are missing")
}

// Record cfilter for this block
err := putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, filters[i].Bytes()))
merkleRoot := extractBlockHeaderMerkleRoot(header)
merkleRootHash, err := chainhash.NewHash(merkleRoot)
if err != nil {
return errors.E(errors.Invalid, errors.Errorf("invalid stored header %v", blockHash))
}
bcf2Key := blockcf2.Key(merkleRootHash)
err = putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, filters[i].Bytes()))
if err != nil {
return err
}
}

// Mark all main chain cfilters as saved if the last block hash is the main
// chain tip.
// chain tip. Even if this is not the head block, all cfilters may be saved
// at this point. The caller may need to check and set rootHaveCFilters.
tip, _ := s.MainChainTip(dbtx)
if bytes.Equal(tip[:], blockHashes[len(blockHashes)-1][:]) {
err := ns.Put(rootHaveCFilters, []byte{1})
Expand Down
Loading
Loading