diff --git a/.travis.yml b/.travis.yml index 0c321a96f9..d1eb3ac49b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ jobs: - stage: lint os: linux dist: xenial - go: 1.14.x + go: 1.15.x env: - lint git: @@ -16,14 +16,14 @@ jobs: script: - make lint - # Run chipprgeth -specific tests, proving regression-safety and config interoperability. - #- stage: test - # name: "Go1.14.x: make test-chipprgeth" - # os: linux - # dist: xenial - # go: 1.14.x - # script: - # - make test-chipprgeth + # Run core-geth -specific tests, proving regression-safety and config interoperability. + - stage: build + name: "Go1.15.x: make test-chipprgeth" + os: linux + dist: xenial + go: 1.15.x + script: + - make test-chipprgeth # Run build and tests against latest-1 Go version. #- stage: test @@ -37,26 +37,39 @@ jobs: # - make all # - travis_wait 60 make test - # Run build and tests on ARM64 on Pull Requests. - #- stage: test - # name: "ARM64/Go1.14.x: make test" - # if: type = pull_request - # os: linux - # arch: arm64 - # dist: xenial - # go: 1.14.x - # env: - # - GO111MODULE=on - # script: - # - make all - # - travis_wait 60 make test -# - # Run build with environment-aware possible artifact deployment. - - stage: deploy - name: "Go1.14.x: make all && deploy" + # Run build and tests against latest-1 Go version. + - stage: build + name: "Go1.14.x: make test" os: linux dist: xenial go: 1.14.x + env: + - GO111MODULE=on + script: + - make all + - travis_wait 60 make test + + # Run build and tests on ARM64 on Pull Requests. + - stage: build + name: "ARM64/Go1.15.x: make test" + if: type = pull_request + os: linux + arch: arm64 + dist: xenial + go: 1.15.x + env: + - GO111MODULE=on + script: + - make all + - travis_wait 60 make test + + # Run build and tests with environment-aware possible artifact deployment. + - stage: build + if: type = push + name: "Go1.15.x: make test && deploy" + os: linux + dist: xenial + go: 1.15.x script: - make all before_deploy: @@ -75,12 +88,13 @@ jobs: tag_name: "$TRAVIS_TAG" # Run build on ARM5 with environment-aware possible artifact deployment. - - stage: deploy - name: "ARM5/Go1.14.x: go run build/ci.go install && deploy" + - stage: build + if: type = push + name: "ARM5/Go1.15.x: go run build/ci.go install && deploy" os: linux dist: xenial sudo: required - go: 1.14.x + go: 1.15.x env: - ARMv5 git: @@ -110,10 +124,11 @@ jobs: tag_name: "$TRAVIS_TAG" #Run build on OSX with environment-aware possible artifact deployment. - - stage: deploy - name: "OSX/Go1.14.x: make all && deploy" + - stage: build + if: type = push + name: "OSX/Go1.15.x: make all && deploy" os: osx - go: 1.14.x + go: 1.15.x script: - echo "Increase the maximum number of open file descriptors on macOS" - NOFILE=20480 diff --git a/Dockerfile b/Dockerfile index 54453c4df5..0705361f5b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.14-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 9c28979a1e..e2604232cf 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.14-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index e2d4d49705..89a72234d7 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -678,14 +678,16 @@ func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *type } // AdjustTime adds a time shift to the simulated clock. +// It can only be called on empty blocks. func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { b.mu.Lock() defer b.mu.Unlock() + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("Could not adjust time on non-empty block") + } + blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { - for _, tx := range b.pendingBlock.Transactions() { - block.AddTx(tx) - } block.OffsetTime(int64(adjustment.Seconds())) }) statedb, _ := b.blockchain.State() diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 73f5ebb169..a7cb7e44ce 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -144,8 +144,7 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { defer sim.Close() prevTime := sim.pendingBlock.Time() - err := sim.AdjustTime(time.Second) - if err != nil { + if err := sim.AdjustTime(time.Second); err != nil { t.Error(err) } newTime := sim.pendingBlock.Time() @@ -155,6 +154,44 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { } } +func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + // Create tx and send + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), vars.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx) + // AdjustTime should fail on non-empty block + if err := sim.AdjustTime(time.Second); err == nil { + t.Error("Expected adjust time to error on non-empty block") + } + sim.Commit() + + prevTime := sim.pendingBlock.Time() + if err := sim.AdjustTime(time.Minute); err != nil { + t.Error(err) + } + newTime := sim.pendingBlock.Time() + if newTime-prevTime != uint64(time.Minute.Seconds()) { + t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) + } + // Put a transaction after adjusting time + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), vars.TxGas, big.NewInt(1), nil) + signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx2) + sim.Commit() + newTime = sim.pendingBlock.Time() + if newTime-prevTime >= uint64(time.Minute.Seconds()) { + t.Errorf("time adjusted, but shouldn't be: prev: %v, new: %v", prevTime, newTime) + } +} + func TestSimulatedBackend_BalanceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) expectedBal := big.NewInt(10000000000) diff --git a/appveyor.yml b/appveyor.yml index a47be111ca..5b94f27f3b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -19,8 +19,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.14.2.windows-%GETH_ARCH%.zip - - 7z x go1.14.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://dl.google.com/go/go1.15.windows-%GETH_ARCH%.zip + - 7z x go1.15.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/build/checksums.txt b/build/checksums.txt index c4b276e349..39f855cd0c 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,6 +1,6 @@ # This file contains sha256 checksums of optional build dependencies. -98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c go1.14.2.src.tar.gz +69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a go1.15.src.tar.gz d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip diff --git a/build/ci.go b/build/ci.go index c298465e6d..10ad3839e5 100644 --- a/build/ci.go +++ b/build/ci.go @@ -149,13 +149,14 @@ var ( // Note: zesty is unsupported because it was officially deprecated on Launchpad. // Note: artful is unsupported because it was officially deprecated on Launchpad. // Note: cosmic is unsupported because it was officially deprecated on Launchpad. + // Note: disco is unsupported because it was officially deprecated on Launchpad. debDistroGoBoots = map[string]string{ "trusty": "golang-1.11", "xenial": "golang-go", "bionic": "golang-go", - "disco": "golang-go", "eoan": "golang-go", "focal": "golang-go", + "groovy": "golang-go", } debGoBootPaths = map[string]string{ diff --git a/cmd/ancient-store-mem/lib/mem.go b/cmd/ancient-store-mem/lib/mem.go index 0b8f99f30d..5d788afac2 100644 --- a/cmd/ancient-store-mem/lib/mem.go +++ b/cmd/ancient-store-mem/lib/mem.go @@ -60,7 +60,7 @@ func (f *MemFreezerRemoteServerAPI) Reset() { } func (f *MemFreezerRemoteServerAPI) HasAncient(kind string, number uint64) (bool, error) { - fmt.Println("mock server called", "method=HasAncient") + // fmt.Println("mock server called", "method=HasAncient") f.mu.Lock() defer f.mu.Unlock() _, ok := f.store[f.storeKey(kind, number)] @@ -68,7 +68,7 @@ func (f *MemFreezerRemoteServerAPI) HasAncient(kind string, number uint64) (bool } func (f *MemFreezerRemoteServerAPI) Ancient(kind string, number uint64) ([]byte, error) { - fmt.Println("mock server called", "method=Ancient") + // fmt.Println("mock server called", "method=Ancient") f.mu.Lock() defer f.mu.Unlock() v, ok := f.store[f.storeKey(kind, number)] @@ -79,12 +79,12 @@ func (f *MemFreezerRemoteServerAPI) Ancient(kind string, number uint64) ([]byte, } func (f *MemFreezerRemoteServerAPI) Ancients() (uint64, error) { - fmt.Println("mock server called", "method=Ancients") + // fmt.Println("mock server called", "method=Ancients") return f.count, nil } func (f *MemFreezerRemoteServerAPI) AncientSize(kind string) (uint64, error) { - fmt.Println("mock server called", "method=AncientSize") + // fmt.Println("mock server called", "method=AncientSize") sum := uint64(0) for k, v := range f.store { if strings.HasPrefix(k, kind) { @@ -95,7 +95,7 @@ func (f *MemFreezerRemoteServerAPI) AncientSize(kind string) (uint64, error) { } func (f *MemFreezerRemoteServerAPI) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error { - fmt.Println("mock server called", "method=AppendAncient", "number=", number, "header", fmt.Sprintf("%x", header)) + // fmt.Println("mock server called", "method=AppendAncient", "number=", number, "header", fmt.Sprintf("%x", header)) fieldNames := []string{ freezerRemoteHashTable, freezerRemoteHeaderTable, @@ -118,7 +118,7 @@ func (f *MemFreezerRemoteServerAPI) AppendAncient(number uint64, hash, header, b } func (f *MemFreezerRemoteServerAPI) TruncateAncients(n uint64) error { - fmt.Println("mock server called", "method=TruncateAncients") + // fmt.Println("mock server called", "method=TruncateAncients") f.count = n f.mu.Lock() defer f.mu.Unlock() @@ -136,11 +136,11 @@ func (f *MemFreezerRemoteServerAPI) TruncateAncients(n uint64) error { } func (f *MemFreezerRemoteServerAPI) Sync() error { - fmt.Println("mock server called", "method=Sync") + // fmt.Println("mock server called", "method=Sync") return nil } func (f *MemFreezerRemoteServerAPI) Close() error { - fmt.Println("mock server called", "method=Close") + // fmt.Println("mock server called", "method=Close") return nil } diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 418417475d..8f0848bde8 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -29,6 +29,8 @@ Command line params that has to be supported are --trace Output full trace logs to files .jsonl --trace.nomemory Disable full memory dump in traces --trace.nostack Disable stack output in traces + --trace.noreturndata Disable return data output in traces + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".") --output.alloc alloc Determines where to put the alloc of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output @@ -232,13 +234,13 @@ Example where blockhashes are provided: ./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace ``` ``` -cat trace-0.jsonl | grep BLOCKHASH -C2 +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 ``` ``` -{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"depth":1,"refund":0,"opName":"PUSH1","error":""} -{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} -{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"depth":1,"refund":0,"opName":"STOP","error":""} -{"output":"","gasUsed":"0x17","time":155861} +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":112885} ``` In this example, the caller has not provided the required blockhash: @@ -254,9 +256,9 @@ Error code: 4 Another thing that can be done, is to chain invocations: ``` ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json -INFO [06-29|11:52:04.934] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [06-29|11:52:04.936] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [06-29|11:52:04.936] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" ``` What happened here, is that we first applied two identical transactions, so the second one was rejected. diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c8f373ef1f..eb02a022e4 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -82,7 +83,7 @@ type stEnvMarshaling struct { // Apply applies a set of transactions to a pre-state func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig ctypes.ChainConfigurator, txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { // Capture errors for BLOCKHASH operation, if we haven't been supplied the // required blockhashes @@ -137,7 +138,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig ctypes.ChainConfigura rejectedTxs = append(rejectedTxs, i) continue } - tracer, err := getTracerFn(txIndex) + tracer, err := getTracerFn(txIndex, tx.Hash()) if err != nil { return nil, nil, err } @@ -223,8 +224,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig ctypes.ChainConfigura } execRs := &ExecutionResult{ StateRoot: root, - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: types.DeriveSha(receipts), + TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)), + ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)), Bloom: types.CreateBloom(receipts), LogsHash: rlpHash(statedb.Logs()), Receipts: receipts, diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index d110af2c30..424156ba82 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -42,6 +42,11 @@ var ( Name: "trace.noreturndata", Usage: "Disable return data output in traces", } + OutputBasedir = cli.StringFlag{ + Name: "output.basedir", + Usage: "Specifies where output files are placed. Will be created if it does not exist.", + Value: "", + } OutputAllocFlag = cli.StringFlag{ Name: "output.alloc", Usage: "Determines where to put the `alloc` of the post-state.\n" + diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 969c8830c1..419e117ec3 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "math/big" "os" + "path" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" @@ -75,11 +76,22 @@ func Main(ctx *cli.Context) error { log.Root().SetHandler(glogger) var ( - err error - tracer vm.Tracer + err error + tracer vm.Tracer + baseDir = "" ) - var getTracer func(txIndex int) (vm.Tracer, error) + var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) + // If user specified a basedir, make sure it exists + if ctx.IsSet(OutputBasedir.Name) { + if base := ctx.String(OutputBasedir.Name); len(base) > 0 { + err := os.MkdirAll(base, 0755) // //rw-r--r-- + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + baseDir = base + } + } if ctx.Bool(TraceFlag.Name) { // Configure the EVM logger logConfig := &vm.LogConfig{ @@ -95,11 +107,11 @@ func Main(ctx *cli.Context) error { prevFile.Close() } }() - getTracer = func(txIndex int) (vm.Tracer, error) { + getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { if prevFile != nil { prevFile.Close() } - traceFile, err := os.Create(fmt.Sprintf("trace-%d.jsonl", txIndex)) + traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String()))) if err != nil { return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) } @@ -107,7 +119,7 @@ func Main(ctx *cli.Context) error { return vm.NewJSONLogger(logConfig, traceFile), nil } } else { - getTracer = func(txIndex int) (tracer vm.Tracer, err error) { + getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { return nil, nil } } @@ -199,7 +211,7 @@ func Main(ctx *cli.Context) error { //postAlloc := state.DumpGenesisFormat(false, false, false) collector := make(Alloc) state.DumpToCollector(collector, false, false, false, nil, -1) - return dispatchOutput(ctx, result, collector) + return dispatchOutput(ctx, baseDir, result, collector) } @@ -226,12 +238,12 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { } // saveFile marshalls the object to the given file -func saveFile(filename string, data interface{}) error { +func saveFile(baseDir, filename string, data interface{}) error { b, err := json.MarshalIndent(data, "", " ") if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } - if err = ioutil.WriteFile(filename, b, 0644); err != nil { + if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil { return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) } return nil @@ -239,26 +251,26 @@ func saveFile(filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, result *ExecutionResult, alloc Alloc) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) - dispatch := func(fName, name string, obj interface{}) error { + dispatch := func(baseDir, fName, name string, obj interface{}) error { switch fName { case "stdout": stdOutObject[name] = obj case "stderr": stdErrObject[name] = obj default: // save to file - if err := saveFile(fName, obj); err != nil { + if err := saveFile(baseDir, fName, obj); err != nil { return err } } return nil } - if err := dispatch(ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { + if err := dispatch(baseDir, ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { return err } - if err := dispatch(ctx.String(OutputResultFlag.Name), "result", result); err != nil { + if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { return err } if len(stdOutObject) > 0 { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 7b472350d9..35c672142d 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -146,6 +146,7 @@ var stateTransitionCommand = cli.Command{ t8ntool.TraceDisableMemoryFlag, t8ntool.TraceDisableStackFlag, t8ntool.TraceDisableReturnDataFlag, + t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, t8ntool.OutputResultFlag, t8ntool.InputAllocFlag, diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index d1400ca577..34c9249855 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -155,10 +155,10 @@ echo "Example where blockhashes are provided: " cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" tick && echo $cmd && tick $cmd 2>&1 >/dev/null -cmd="cat trace-0.jsonl | grep BLOCKHASH -C2" +cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" tick && echo $cmd && tick echo "$ticks" -cat trace-0.jsonl | grep BLOCKHASH -C2 +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 echo "$ticks" echo "" diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 1bf52d591a..ae9ed5ecf5 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -95,9 +95,9 @@ func (g *gethrpc) waitSynced() { } } -func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc { +func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { g := &gethrpc{name: name} - args = append([]string{"--networkid=42", "--port=0", "--nousb", "--http", "--http.port=0", "--http.api=admin,eth,les"}, args...) + args = append([]string{"--networkid=42", "--port=0", "--nousb"}, args...) t.Logf("Starting %v with rpc: %v", name, args) g.geth = runGeth(t, args...) // wait before we can attach to it. TODO: probe for it properly @@ -112,7 +112,7 @@ func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc { } func initGeth(t *testing.T) string { - g := runGeth(t, "--networkid=42", "init", "./testdata/clique.json") + g := runGeth(t, "--nousb", "--networkid=42", "init", "./testdata/clique.json") datadir := g.Datadir g.WaitExit() return datadir @@ -120,15 +120,15 @@ func initGeth(t *testing.T) string { func startLightServer(t *testing.T) *gethrpc { datadir := initGeth(t) - runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() + runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" - server := startGethWithRpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") return server } func startClient(t *testing.T, name string) *gethrpc { datadir := initGeth(t) - return startGethWithRpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") + return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") } func TestPriorityClient(t *testing.T) { @@ -166,6 +166,7 @@ func TestPriorityClient(t *testing.T) { freeCli.getNodeInfo().ID: freeCli, prioCli.getNodeInfo().ID: prioCli, } + time.Sleep(1 * time.Second) lightServer.callRPC(&peers, "admin_peers") peersWithNames := make(map[string]string) for _, p := range peers { diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go index ac3ff3c0bc..8debbcd23c 100644 --- a/cmd/geth/retesteth.go +++ b/cmd/geth/retesteth.go @@ -252,7 +252,7 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header.Root = statedb.IntermediateRoot(chain.Config().IsEnabled(chain.Config().GetEIP161dTransition, header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } } diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 2f468d4781..05e2886da9 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/params/vars" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" "golang.org/x/crypto/sha3" ) @@ -566,7 +567,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts), nil + return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index d6c871092e..4d57e2f79b 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -39,7 +39,8 @@ const ( datasetGrowthBytes = 1 << 23 // Dataset growth per epoch cacheInitBytes = 1 << 24 // Bytes in cache at genesis cacheGrowthBytes = 1 << 17 // Cache growth per epoch - epochLength = 30000 // Blocks per epoch + oldEpochLength = 30000 // Blocks per epoch pre ECIP-1099 activation + newEpochLength = 60000 // Blocks per epoch post ECIP-1099 activation mixBytes = 128 // Width of mix hashBytes = 64 // Hash length in bytes hashWords = 16 // Number of 32 bit ints in a hash @@ -48,14 +49,35 @@ const ( loopAccesses = 64 // Number of accesses in hashimoto loop ) +// Activation block for ECIP-1099 (etchash) +// mainnet: 11460000 +// mordor: 2340000 +// TODO: Move to chain configs - iquidus +const ecip1099Block = uint64(11460000) + +// calcEpochLength returns the epoch length for a given block number (ECIP-1099) +func calcEpochLength(block uint64) uint64 { + if block < ecip1099Block { + return oldEpochLength + } + return newEpochLength +} + +// calcEpoch returns the epoch for a given block number (ECIP-1099) +func calcEpoch(block uint64) (uint64, uint64) { + epochLength := calcEpochLength(block) + epoch := block / epochLength + return epoch, epochLength +} + // cacheSize returns the size of the ethash verification cache that belongs to a certain // block number. func cacheSize(block uint64) uint64 { - epoch := int(block / epochLength) + epoch, _ := calcEpoch(block) if epoch < maxEpoch { - return cacheSizes[epoch] + return cacheSizes[int(epoch)] } - return calcCacheSize(epoch) + return calcCacheSize(int(epoch)) } // calcCacheSize calculates the cache size for epoch. The cache size grows linearly, @@ -72,11 +94,11 @@ func calcCacheSize(epoch int) uint64 { // datasetSize returns the size of the ethash mining dataset that belongs to a certain // block number. func datasetSize(block uint64) uint64 { - epoch := int(block / epochLength) + epoch, _ := calcEpoch(block) if epoch < maxEpoch { - return datasetSizes[epoch] + return datasetSizes[int(epoch)] } - return calcDatasetSize(epoch) + return calcDatasetSize(int(epoch)) } // calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly, @@ -120,11 +142,11 @@ func makeHasher(h hash.Hash) hasher { // dataset. func seedHash(block uint64) []byte { seed := make([]byte, 32) - if block < epochLength { + if block < oldEpochLength { return seed } keccak256 := makeHasher(sha3.NewLegacyKeccak256()) - for i := 0; i < int(block/epochLength); i++ { + for i := 0; i < int(block/oldEpochLength); i++ { keccak256(seed, seed) } return seed @@ -136,7 +158,7 @@ func seedHash(block uint64) []byte { // algorithm from Strict Memory Hard Hashing Functions (2014). The output is a // set of 524288 64-byte values. // This method places the result into dest in machine byte order. -func generateCache(dest []uint32, epoch uint64, seed []byte) { +func generateCache(dest []uint32, epoch uint64, epochLength uint64, seed []byte) { // Print some debug logs to allow analysis on low end devices logger := log.New("epoch", epoch) @@ -148,7 +170,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { if elapsed > 3*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed)) }() // Convert our destination slice to a byte buffer header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest)) @@ -172,7 +194,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) } } }() @@ -264,7 +286,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte // generateDataset generates the entire ethash dataset for mining. // This method places the result into dest in machine byte order. -func generateDataset(dest []uint32, epoch uint64, cache []uint32) { +func generateDataset(dest []uint32, epoch uint64, epochLength uint64, cache []uint32) { // Print some debug logs to allow analysis on low end devices logger := log.New("epoch", epoch) @@ -276,7 +298,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { if elapsed > 3*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed)) }() // Figure out whether the bytes need to be swapped for the machine @@ -320,7 +342,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { copy(dataset[index*hashBytes:], item) if status := atomic.AddUint32(&progress, 1); status%percent == 0 { - logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating DAG in progress", "epochLength", epochLength, "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) } } }(i) diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 51fb6b124d..ef60579230 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -108,7 +108,7 @@ func TestCacheGeneration(t *testing.T) { } for i, tt := range tests { cache := make([]uint32, tt.size/4) - generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1)) + generateCache(cache, tt.epoch, oldEpochLength, seedHash(tt.epoch*oldEpochLength+1)) want := make([]uint32, tt.size/4) prepare(want, tt.cache) @@ -648,10 +648,10 @@ func TestDatasetGeneration(t *testing.T) { } for i, tt := range tests { cache := make([]uint32, tt.cacheSize/4) - generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1)) + generateCache(cache, tt.epoch, oldEpochLength, seedHash(tt.epoch*oldEpochLength+1)) dataset := make([]uint32, tt.datasetSize/4) - generateDataset(dataset, tt.epoch, cache) + generateDataset(dataset, tt.epoch, oldEpochLength, cache) want := make([]uint32, tt.datasetSize/4) prepare(want, tt.dataset) @@ -667,10 +667,10 @@ func TestDatasetGeneration(t *testing.T) { func TestHashimoto(t *testing.T) { // Create the verification cache and mining dataset cache := make([]uint32, 1024/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, oldEpochLength, make([]byte, 32)) dataset := make([]uint32, 32*1024/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, 0, oldEpochLength, cache) // Create a block to verify hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") @@ -743,26 +743,26 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { func BenchmarkCacheGeneration(b *testing.B) { for i := 0; i < b.N; i++ { cache := make([]uint32, cacheSize(1)/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, oldEpochLength, make([]byte, 32)) } } // Benchmarks the dataset (small) generation performance. func BenchmarkSmallDatasetGeneration(b *testing.B) { cache := make([]uint32, 65536/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, oldEpochLength, make([]byte, 32)) b.ResetTimer() for i := 0; i < b.N; i++ { dataset := make([]uint32, 32*65536/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, 0, oldEpochLength, cache) } } // Benchmarks the light verification performance. func BenchmarkHashimotoLight(b *testing.B) { cache := make([]uint32, cacheSize(1)/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, oldEpochLength, make([]byte, 32)) hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") @@ -775,10 +775,10 @@ func BenchmarkHashimotoLight(b *testing.B) { // Benchmarks the full (small) verification performance. func BenchmarkHashimotoFullSmall(b *testing.B) { cache := make([]uint32, 65536/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, oldEpochLength, make([]byte, 32)) dataset := make([]uint32, 32*65536/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, oldEpochLength, 0, cache) hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 773ff4a436..4787b58a27 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/vars" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -550,7 +551,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEnabled(chain.Config().GetEIP161dTransition, header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index aa3f002c0d..aed3c0da74 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -155,7 +155,7 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu // lru tracks caches or datasets by their last use time, keeping at most N of them. type lru struct { what string - new func(epoch uint64) interface{} + new func(epoch uint64, epochLength uint64) interface{} mu sync.Mutex // Items are kept in a LRU cache, but there is a special case: // We always keep an item for (highest seen epoch) + 1 as the 'future item'. @@ -166,7 +166,7 @@ type lru struct { // newlru create a new least-recently-used cache for either the verification caches // or the mining datasets. -func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { +func newlru(what string, maxItems int, new func(epoch uint64, epochLength uint64) interface{}) *lru { if maxItems <= 0 { maxItems = 1 } @@ -179,7 +179,7 @@ func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru // get retrieves or creates an item for the given epoch. The first return value is always // non-nil. The second return value is non-nil if lru thinks that an item will be useful in // the near future. -func (lru *lru) get(epoch uint64) (item, future interface{}) { +func (lru *lru) get(epoch uint64, epochLength uint64) (item, future interface{}) { lru.mu.Lock() defer lru.mu.Unlock() @@ -190,14 +190,14 @@ func (lru *lru) get(epoch uint64) (item, future interface{}) { item = lru.futureItem } else { log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) - item = lru.new(epoch) + item = lru.new(epoch, epochLength) } lru.cache.Add(epoch, item) } // Update the 'future item' if epoch is larger than previously seen. if epoch < maxEpoch-1 && lru.future < epoch+1 { log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) - future = lru.new(epoch + 1) + future = lru.new(epoch+1, epochLength) lru.future = epoch + 1 lru.futureItem = future } @@ -206,31 +206,32 @@ func (lru *lru) get(epoch uint64) (item, future interface{}) { // cache wraps an ethash cache with some metadata to allow easier concurrent use. type cache struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - cache []uint32 // The actual cache data content (may be memory mapped) - once sync.Once // Ensures the cache is generated only once + epoch uint64 // Epoch for which this cache is relevant + epochLength uint64 // Epoch length (ECIP-1099) + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + cache []uint32 // The actual cache data content (may be memory mapped) + once sync.Once // Ensures the cache is generated only once } // newCache creates a new ethash verification cache and returns it as a plain Go // interface to be usable in an LRU cache. -func newCache(epoch uint64) interface{} { - return &cache{epoch: epoch} +func newCache(epoch uint64, epochLength uint64) interface{} { + return &cache{epoch: epoch, epochLength: epochLength} } // generate ensures that the cache content is generated before use. func (c *cache) generate(dir string, limit int, lock bool, test bool) { c.once.Do(func() { - size := cacheSize(c.epoch*epochLength + 1) - seed := seedHash(c.epoch*epochLength + 1) + size := cacheSize(c.epoch*c.epochLength + 1) + seed := seedHash(c.epoch*c.epochLength + 1) if test { size = 1024 } // If we don't store anything on disk, generate and return. if dir == "" { c.cache = make([]uint32, size/4) - generateCache(c.cache, c.epoch, seed) + generateCache(c.cache, c.epoch, c.epochLength, seed) return } // Disk storage is needed, this will get fancy @@ -255,16 +256,16 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) { logger.Debug("Failed to load old ethash cache", "err", err) // No previous cache available, create a new cache file to fill - c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) + c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, c.epochLength, seed) }) if err != nil { logger.Error("Failed to generate mapped ethash cache", "err", err) c.cache = make([]uint32, size/4) - generateCache(c.cache, c.epoch, seed) + generateCache(c.cache, c.epoch, c.epochLength, seed) } // Iterate over all previous instances and delete old ones for ep := int(c.epoch) - limit; ep >= 0; ep-- { - seed := seedHash(uint64(ep)*epochLength + 1) + seed := seedHash(uint64(ep)*c.epochLength + 1) path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) os.Remove(path) } @@ -282,18 +283,19 @@ func (c *cache) finalizer() { // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. type dataset struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - dataset []uint32 // The actual cache data content - once sync.Once // Ensures the cache is generated only once - done uint32 // Atomic flag to determine generation status + epoch uint64 // Epoch for which this cache is relevant + epochLength uint64 // Epoch length (ECIP-1099) + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + dataset []uint32 // The actual cache data content + once sync.Once // Ensures the cache is generated only once + done uint32 // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go // interface to be usable in an LRU cache. -func newDataset(epoch uint64) interface{} { - return &dataset{epoch: epoch} +func newDataset(epoch uint64, epochLength uint64) interface{} { + return &dataset{epoch: epoch, epochLength: epochLength} } // generate ensures that the dataset content is generated before use. @@ -302,9 +304,9 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // Mark the dataset generated after we're done. This is needed for remote defer atomic.StoreUint32(&d.done, 1) - csize := cacheSize(d.epoch*epochLength + 1) - dsize := datasetSize(d.epoch*epochLength + 1) - seed := seedHash(d.epoch*epochLength + 1) + csize := cacheSize(d.epoch*d.epochLength + 1) + dsize := datasetSize(d.epoch*d.epochLength + 1) + seed := seedHash(d.epoch*d.epochLength + 1) if test { csize = 1024 dsize = 32 * 1024 @@ -312,10 +314,10 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // If we don't store anything on disk, generate and return if dir == "" { cache := make([]uint32, csize/4) - generateCache(cache, d.epoch, seed) + generateCache(cache, d.epoch, d.epochLength, seed) d.dataset = make([]uint32, dsize/4) - generateDataset(d.dataset, d.epoch, cache) + generateDataset(d.dataset, d.epoch, d.epochLength, cache) return } @@ -342,18 +344,18 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // No previous dataset available, create a new dataset file to fill cache := make([]uint32, csize/4) - generateCache(cache, d.epoch, seed) + generateCache(cache, d.epoch, d.epochLength, seed) - d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) + d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, d.epochLength, cache) }) if err != nil { logger.Error("Failed to generate mapped ethash dataset", "err", err) d.dataset = make([]uint32, dsize/2) - generateDataset(d.dataset, d.epoch, cache) + generateDataset(d.dataset, d.epoch, d.epochLength, cache) } // Iterate over all previous instances and delete old ones for ep := int(d.epoch) - limit; ep >= 0; ep-- { - seed := seedHash(uint64(ep)*epochLength + 1) + seed := seedHash(uint64(ep)*d.epochLength + 1) path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) os.Remove(path) } @@ -378,13 +380,15 @@ func (d *dataset) finalizer() { // MakeCache generates a new ethash cache and optionally stores it to disk. func MakeCache(block uint64, dir string) { - c := cache{epoch: block / epochLength} + epoch, epochLength := calcEpoch(block) + c := cache{epoch: epoch, epochLength: epochLength} c.generate(dir, math.MaxInt32, false, false) } // MakeDataset generates a new ethash dataset and optionally stores it to disk. func MakeDataset(block uint64, dir string) { - d := dataset{epoch: block / epochLength} + epoch, epochLength := calcEpoch(block) + d := dataset{epoch: epoch, epochLength: epochLength} d.generate(dir, math.MaxInt32, false, false) } @@ -553,8 +557,8 @@ func (ethash *Ethash) Close() error { // by first checking against a list of in-memory caches, then against caches // stored on disk, and finally generating one if none can be found. func (ethash *Ethash) cache(block uint64) *cache { - epoch := block / epochLength - currentI, futureI := ethash.caches.get(epoch) + epoch, epochLength := calcEpoch(block) + currentI, futureI := ethash.caches.get(epoch, epochLength) current := currentI.(*cache) // Wait for generation finish. @@ -576,8 +580,8 @@ func (ethash *Ethash) cache(block uint64) *cache { // generates on a background thread. func (ethash *Ethash) dataset(block uint64, async bool) *dataset { // Retrieve the requested ethash dataset - epoch := block / epochLength - currentI, futureI := ethash.datasets.get(epoch) + epoch, epochLength := calcEpoch(block) + currentI, futureI := ethash.datasets.get(epoch, epochLength) current := currentI.(*dataset) // If async is specified, generate everything in a background thread diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index fdfd81320f..c7c9472561 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -77,11 +77,10 @@ func TestCacheFileEvict(t *testing.T) { func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) { defer wg.Done() - - const wiggle = 4 * epochLength + const wiggle = 4 * oldEpochLength r := rand.New(rand.NewSource(int64(workerIndex))) for epoch := 0; epoch < epochs; epoch++ { - block := int64(epoch)*epochLength - wiggle/2 + r.Int63n(wiggle) + block := int64(epoch)*oldEpochLength - wiggle/2 + r.Int63n(wiggle) if block < 0 { block = 0 } diff --git a/core/block_validator.go b/core/block_validator.go index 37bea2b246..8c3773695a 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) // BlockValidator is responsible for validating block headers, uncles and @@ -62,7 +63,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash) } - if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash { + if hash := types.DeriveSha(block.Transactions(), new(trie.Trie)); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) } if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { @@ -90,7 +91,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) } // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]])) - receiptSha := types.DeriveSha(receipts) + receiptSha := types.DeriveSha(receipts, new(trie.Trie)) if receiptSha != header.ReceiptHash { return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) } diff --git a/core/blockchain.go b/core/blockchain.go index 59553e469b..f100c55131 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -70,8 +70,11 @@ var ( blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) - blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) - blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) + + blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) + blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) + blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) + blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) @@ -109,7 +112,10 @@ const ( // - Version 7 // The following incompatible database changes were added: // * Use freezer as the ancient database to maintain all ancient data - BlockChainVersion uint64 = 7 + // - Version 8 + // The following incompatible database changes were added: + // * New scheme for contract code in order to separate the codes and trie nodes + BlockChainVersion uint64 = 8 ) // CacheConfig contains the configuration values for the trie caching/pruning @@ -127,6 +133,16 @@ type CacheConfig struct { SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } +// defaultCacheConfig are the default caching values if none are specified by the +// user (also used during testing). +var defaultCacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, +} + // BlockChain represents the canonical chain given a database with a genesis // block. The Blockchain manages chain imports, reverts, chain reorganisations. // @@ -201,13 +217,7 @@ type BlockChain struct { // Processor. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctypes.ChainConfigurator, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { if cacheConfig == nil { - cacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } + cacheConfig = defaultCacheConfig } bodyCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit) @@ -264,16 +274,23 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp if frozen > 0 { txIndexBlock = frozen } + // loadLastState and other steps below assume that CurrentBlock is not nil. + if bc.CurrentBlock() == nil { + bc.writeHeadBlock(bc.genesisBlock) + } } - if err := bc.loadLastState(); err != nil { return nil, err } - // The first thing the node will do is reconstruct the verification data for - // the head block (ethash cache or clique voting snapshot). Might as well do - // it in advance. - bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) - + // Make sure the state associated with the block is available + head := bc.CurrentBlock() + if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { + log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) + if err := bc.SetHead(head.NumberU64()); err != nil { + return nil, err + } + } + // Ensure that a previous crash in SetHead doesn't leave extra ancients if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { var ( needRewind bool @@ -283,7 +300,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp // blockchain repair. If the head full block is even lower than the ancient // chain, truncate the ancient store. fullBlock := bc.CurrentBlock() - if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 { + if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { needRewind = true low = fullBlock.NumberU64() } @@ -298,15 +315,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp } } if needRewind { - var hashes []common.Hash - previous := bc.CurrentHeader().Number.Uint64() - for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ { - hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i)) + log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) + if err := bc.SetHead(low); err != nil { + return nil, err } - bc.Rollback(hashes) - log.Warn("Truncated ancient chain", "from", previous, "to", low) } } + // The first thing the node will do is reconstruct the verification data for + // the head block (ethash cache or clique voting snapshot). Might as well do + // it in advance. + bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) + // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for hash := range BadHashes { if header := bc.GetHeaderByHash(hash); header != nil { @@ -315,7 +334,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp // make sure the headerByNumber (if present) is in our current canonical chain if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) - bc.SetHead(header.Number.Uint64() - 1) + if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { + return nil, err + } log.Error("Chain rewind was successful, resuming normal operation") } } @@ -357,8 +378,11 @@ func (bc *BlockChain) GetVMConfig() *vm.Config { // into node seamlessly. func (bc *BlockChain) empty() bool { genesis := bc.genesisBlock.Hash() - for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { - if hash != genesis { + for _, hash := range []common.Hash{ + rawdb.ReadHeadBlockHash(bc.db), + rawdb.ReadHeadHeaderHash(bc.db), + rawdb.ReadHeadFastBlockHash(bc.db)} { + if hash != (common.Hash{}) && hash != genesis { return false } } @@ -382,15 +406,6 @@ func (bc *BlockChain) loadLastState() error { log.Warn("Head block missing, resetting chain", "hash", head) return bc.Reset() } - // Make sure the state associated with the block is available - if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // Dangling block without a state associated, init from scratch - log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) - if err := bc.repair(¤tBlock); err != nil { - return err - } - rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) - } // Everything seems to be fine, set as the head block bc.currentBlock.Store(currentBlock) headBlockGauge.Update(int64(currentBlock.NumberU64())) @@ -424,30 +439,48 @@ func (bc *BlockChain) loadLastState() error { log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) - + if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { + log.Info("Loaded last fast-sync pivot marker", "number", *pivot) + } return nil } -// SetHead rewinds the local chain to a new head. In the case of headers, everything -// above the new head will be deleted and the new one set. In the case of blocks -// though, the head may be further rewound if block bodies are missing (non-archive -// nodes after a fast sync). +// SetHead rewinds the local chain to a new head. Depending on whether the node +// was fast synced or full synced and in which state, the method will try to +// delete minimal data from disk whilst retaining chain consistency. func (bc *BlockChain) SetHead(head uint64) error { - log.Warn("Rewinding blockchain", "target", head) - bc.chainmu.Lock() defer bc.chainmu.Unlock() - updateFn := func(db ethdb.KeyValueWriter, header *types.Header) { - // Rewind the block chain, ensuring we don't end up with a stateless head block - if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() { + // Retrieve the last pivot block to short circuit rollbacks beyond it and the + // current freezer limit to start nuking id underflown + pivot := rawdb.ReadLastPivotNumber(bc.db) + frozen, _ := bc.db.Ancients() + + updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { + // Rewind the block chain, ensuring we don't end up with a stateless head + // block. Note, depth equality is permitted to allow using SetHead as a + // chain reparation mechanism without deleting any data! + if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) if newHeadBlock == nil { + log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock } else { - if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // Rewound state missing, rolled back to before pivot, reset to genesis - newHeadBlock = bc.genesisBlock + // Block exists, keep rewinding until we find one with state + for { + if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { + log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + if pivot == nil || newHeadBlock.NumberU64() > *pivot { + newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) + continue + } else { + log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) + newHeadBlock = bc.genesisBlock + } + } + log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + break } } rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) @@ -459,7 +492,6 @@ func (bc *BlockChain) SetHead(head uint64) error { bc.currentBlock.Store(newHeadBlock) headBlockGauge.Update(int64(newHeadBlock.NumberU64())) } - // Rewind the fast block in a simpleton way to the target head if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) @@ -476,8 +508,17 @@ func (bc *BlockChain) SetHead(head uint64) error { bc.currentFastBlock.Store(newHeadFastBlock) headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) } - } + head := bc.CurrentBlock().NumberU64() + // If setHead underflown the freezer threshold and the block processing + // intent afterwards is full block importing, delete the chain segment + // between the stateful-block and the sethead target. + var wipe bool + if head+1 < frozen { + wipe = pivot == nil || head >= *pivot + } + return head, wipe // Only force wipe if full synced + } // Rewind the header chain, deleting all block bodies until then delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { // Ignore the error here since light client won't hit this path @@ -485,10 +526,9 @@ func (bc *BlockChain) SetHead(head uint64) error { if num+1 <= frozen { // Truncate all relative data(header, total difficulty, body, receipt // and canonical hash) from ancient store. - if err := bc.db.TruncateAncients(num + 1); err != nil { + if err := bc.db.TruncateAncients(num); err != nil { log.Crit("Failed to truncate ancient data", "number", num, "err", err) } - // Remove the hash <-> number mapping from the active store. rawdb.DeleteHeaderNumber(db, hash) } else { @@ -500,8 +540,18 @@ func (bc *BlockChain) SetHead(head uint64) error { } // Todo(rjl493456442) txlookup, bloombits, etc } - bc.hc.SetHead(head, updateFn, delFn) - + // If SetHead was only called as a chain reparation method, try to skip + // touching the header chain altogether, unless the freezer is broken + if block := bc.CurrentBlock(); block.NumberU64() == head { + if target, force := updateFn(bc.db, block.Header()); force { + bc.hc.SetHead(target, updateFn, delFn) + } + } else { + // Rewind the chain to the requested head and keep going backwards until a + // block with a state is found or fast sync pivot is passed + log.Warn("Rewinding blockchain", "target", head) + bc.hc.SetHead(head, updateFn, delFn) + } // Clear out any stale content from the caches bc.bodyCache.Purge() bc.bodyRLPCache.Purge() @@ -624,28 +674,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { return nil } -// repair tries to repair the current blockchain by rolling back the current block -// until one with associated state is found. This is needed to fix incomplete db -// writes caused either by crashes/power outages, or simply non-committed tries. -// -// This method only rolls back the current block. The current header and current -// fast block are left intact. -func (bc *BlockChain) repair(head **types.Block) error { - for { - // Abort if we've rewound to a head block that does have associated state - if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil { - log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) - return nil - } - // Otherwise rewind one block and recheck state availability there - block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) - if block == nil { - return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) - } - *head = block - } -} - // Export writes the active chain to the given writer. func (bc *BlockChain) Export(w io.Writer) error { return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) @@ -877,12 +905,30 @@ func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types. return uncles } -// TrieNode retrieves a blob of data associated with a trie node (or code hash) +// TrieNode retrieves a blob of data associated with a trie node // either from ephemeral in-memory cache, or from persistent storage. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { return bc.stateCache.TrieDB().Node(hash) } +// ContractCode retrieves a blob of data associated with a contract hash +// either from ephemeral in-memory cache, or from persistent storage. +func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { + return bc.stateCache.ContractCode(common.Hash{}, hash) +} + +// ContractCodeWithPrefix retrieves a blob of data associated with a contract +// hash either from ephemeral in-memory cache, or from persistent storage. +// +// If the code doesn't exist in the in-memory cache, check the storage with +// new code scheme. +func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { + type codeReader interface { + ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) + } + return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) +} + // Stop stops the blockchain service. If any imports are currently in progress // it will abort them using the procInterrupt. func (bc *BlockChain) Stop() { @@ -982,52 +1028,6 @@ const ( SideStatTy ) -// Rollback is designed to remove a chain of links from the database that aren't -// certain enough to be valid. -func (bc *BlockChain) Rollback(chain []common.Hash) { - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - batch := bc.db.NewBatch() - for i := len(chain) - 1; i >= 0; i-- { - hash := chain[i] - - // Degrade the chain markers if they are explicitly reverted. - // In theory we should update all in-memory markers in the - // last step, however the direction of rollback is from high - // to low, so it's safe the update in-memory markers directly. - currentHeader := bc.hc.CurrentHeader() - if currentHeader.Hash() == hash { - newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1) - rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash) - bc.hc.SetCurrentHeader(newHeadHeader) - } - if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { - newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) - rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash()) - bc.currentFastBlock.Store(newFastBlock) - headFastBlockGauge.Update(int64(newFastBlock.NumberU64())) - } - if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { - newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) - rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash()) - bc.currentBlock.Store(newBlock) - headBlockGauge.Update(int64(newBlock.NumberU64())) - } - } - if err := batch.Write(); err != nil { - log.Crit("Failed to rollback chain markers", "err", err) - } - // Truncate ancient data which exceeds the current header. - // - // Notably, it can happen that system crashes without truncating the ancient data - // but the head indicator has been updated in the active store. Regarding this issue, - // system will self recovery by truncating the extra data during the setup phase. - if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil { - log.Crit("Truncate ancient store failed", "err", err) - } -} - // truncateAncient rewinds the blockchain to the specified header and deletes all // data in the ancient store that exceeds the specified header. func (bc *BlockChain) truncateAncient(head uint64) error { @@ -1284,6 +1284,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } // writeLive writes blockchain and corresponding receipt chain into active store. writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { + skipPresenceCheck := false batch := bc.db.NewBatch() for i, block := range blockChain { // Short circuit insertion if shutting down or processing failed @@ -1294,9 +1295,17 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if !bc.HasHeader(block.Hash(), block.NumberU64()) { return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) } - if bc.HasBlock(block.Hash(), block.NumberU64()) { - stats.ignored++ - continue + if !skipPresenceCheck { + // Ignore if the entire data is already known + if bc.HasBlock(block.Hash(), block.NumberU64()) { + stats.ignored++ + continue + } else { + // If block N is not present, neither are the later blocks. + // This should be true, but if we are mistaken, the shortcut + // here will only cause overwriting of some existing data + skipPresenceCheck = true + } } // Write all the data out into the database rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) @@ -2159,6 +2168,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) blockReorgAddMeter.Mark(int64(len(newChain))) blockReorgDropMeter.Mark(int64(len(oldChain))) + blockReorgMeter.Mark(1) } else { log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) return fmt.Errorf("impossible reorg") diff --git a/core/blockchain_remotefreezer_test.go b/core/blockchain_remotefreezer_test.go index de3231cc82..a255e81a0d 100644 --- a/core/blockchain_remotefreezer_test.go +++ b/core/blockchain_remotefreezer_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/vars" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" ) // Tests in this file duplicate select tests from blockchain_test.go, @@ -189,18 +190,16 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } // Test a rollback, causing the ancient store to use the TruncateAncient method. - pinch := len(blocks) / 4 - rollbackHeaders := []common.Hash{} - for _, v := range headers[pinch:] { - rollbackHeaders = append(rollbackHeaders, v.Hash()) + if err := ancient.SetHead(0); err != nil { + t.Fatalf("set head err: %v", err) } - ancient.Rollback(rollbackHeaders) // Reinsert the rolled-back headers and receipts. - if n, err := ancient.InsertHeaderChain(headers[pinch:], 1); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) + if n, err := ancient.InsertHeaderChain(headers, 1); err != nil { + t.Log(ancient.CurrentHeader().Number.Uint64()) + t.Fatalf("failed to insert header %d (#%d): %v", n, headers[n].Number.Uint64(), err) } - if n, err := ancient.InsertReceiptChain(blocks[pinch:], receipts, ancientLimit); err != nil { + if n, err := ancient.InsertReceiptChain(blocks, receipts, ancientLimit); err != nil { t.Fatalf("failed to insert receipt %d: %v", n, err) } @@ -232,12 +231,12 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) { + } else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) { t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) } - if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { + if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) { t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) } } @@ -252,77 +251,6 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } } -func TestBlockchainRecovery_RemoteFreezer(t *testing.T) { - // Configure and generate a sample block chain - var ( - gendb = rawdb.NewMemoryDatabase() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000) - gspec = &genesisT.Genesis{Config: params.TestChainConfig, Alloc: genesisT.GenesisAlloc{address: {Balance: funds}}} - genesis = MustCommitGenesis(gendb, gspec) - ) - height := uint64(1024) - blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil) - - // Import the chain as a ancient-first node and ensure all pointers are updated - // Freezer style fast import the chain. - freezerRPCEndpoint, server, ancientDb := testRPCRemoteFreezer(t) - if n, err := ancientDb.Ancients(); err != nil { - t.Fatalf("ancients: %v", err) - } else if n != 0 { - t.Logf("truncating pre-existing ancients from: %d (truncating to 0)", n) - err = ancientDb.TruncateAncients(0) - if err != nil { - t.Fatalf("truncate ancients: %v", err) - } - } - if server != nil { - defer os.RemoveAll(filepath.Dir(freezerRPCEndpoint)) - defer server.Stop() - } - defer ancientDb.Close() // Cause the Close method to be called. - defer func() { - // A deferred truncation to 0 will allow a single freezer instance to - // handle multiple tests in serial. - if err := ancientDb.TruncateAncients(0); err != nil { - t.Fatalf("deferred truncate ancients error: %v", err) - } - }() - - MustCommitGenesis(ancientDb, gspec) - ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) - - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - if n, err := ancient.InsertHeaderChain(headers, 1); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - ancient.Stop() - - // Destroy head fast block manually - midBlock := blocks[len(blocks)/2] - rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) - - // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancient.Stop() - if num := ancient.CurrentBlock().NumberU64(); num != 0 { - t.Errorf("head block mismatch: have #%v, want #%v", num, 0) - } - if num := ancient.CurrentFastBlock().NumberU64(); num != midBlock.NumberU64() { - t.Errorf("head fast-block mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } - if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() { - t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } -} - func TestIncompleteAncientReceiptChainInsertion_RemoteFreezer(t *testing.T) { // Configure and generate a sample block chain var ( diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go new file mode 100644 index 0000000000..96d71d978e --- /dev/null +++ b/core/blockchain_repair_test.go @@ -0,0 +1,1655 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Tests that abnormal program termination (i.e.crash) and restart doesn't leave +// the database in some strange state with gaps in the chain, nor with block data +// dangling in the future. + +package core + +import ( + "io/ioutil" + "math/big" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/types/genesisT" +) + +// Tests a recovery for a short canonical chain where a recent block was already +// committed to disk and then the process crashed. In this case we expect the full +// chain to be rolled back to the committed block, but the chain data itself left +// in the database for replaying. +func TestShortRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain where the fast sync pivot point was +// already committed, after which the process crashed. In this case we expect the full +// chain to be rolled back to the committed block, but the chain data itself left in +// the database for replaying. +func TestShortFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain where the fast sync pivot point was +// not yet committed, but the process crashed. In this case we expect the chain to +// detect that it was fast syncing and not delete anything, since we can just pick +// up directly where we left off. +func TestShortFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// test scenario the side chain is below the committed block. In this case we expect +// the canonical chain to be rolled back to the committed block, but the chain data +// itself left in the database for replaying. +func TestShortOldForkedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this test scenario the side chain is below the committed block. In +// this case we expect the canonical chain to be rolled back to the committed block, +// but the chain data itself left in the database for replaying. +func TestShortOldForkedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In this +// test scenario the side chain is below the committed block. In this case we expect +// the chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestShortOldForkedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// test scenario the side chain reaches above the committed block. In this case we +// expect the canonical chain to be rolled back to the committed block, but the +// chain data itself left in the database for replaying. +func TestShortNewlyForkedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this test scenario the side chain reaches above the committed block. +// In this case we expect the canonical chain to be rolled back to the committed +// block, but the chain data itself left in the database for replaying. +func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In +// this test scenario the side chain reaches above the committed block. In this +// case we expect the chain to detect that it was fast syncing and not delete +// anything, since we can just pick up directly where we left off. +func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// case we expect the canonical chain to be rolled back to the committed block, but +// the chain data itself left in the database for replaying. +func TestShortReorgedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this case we expect the canonical chain to be rolled back to the +// committed block, but the chain data itself left in the database for replaying. +func TestShortReorgedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In +// this case we expect the chain to detect that it was fast syncing and not delete +// anything, since we can just pick up directly where we left off. +func TestShortReorgedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where a recent +// block - newer than the ancient limit - was already committed to disk and then +// the process crashed. In this case we expect the chain to be rolled back to the +// committed block, with everything afterwads kept as fast sync data. +func TestLongShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where a recent +// block - older than the ancient limit - was already committed to disk and then +// the process crashed. In this case we expect the chain to be rolled back to the +// committed block, with everything afterwads deleted. +func TestLongDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was already committed, after +// which the process crashed. In this case we expect the chain to be rolled back +// to the committed block, with everything afterwads kept as fast sync data. +func TestLongFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was already committed, after +// which the process crashed. In this case we expect the chain to be rolled back +// to the committed block, with everything afterwads deleted. +func TestLongFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was not yet committed, but the +// process crashed. In this case we expect the chain to detect that it was fast +// syncing and not delete anything, since we can just pick up directly where we +// left off. +func TestLongFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was not yet committed, but the +// process crashed. In this case we expect the chain to detect that it was fast +// syncing and not delete anything, since we can just pick up directly where we +// left off. +func TestLongFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongOldForkedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the canonical chain +// to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongOldForkedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is below the committed block. In this case we expect the chain +// to be rolled back to the committed block, with everything afterwads kept as +// fast sync data; the side chain completely nuked by the freezer. +func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// chain to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongNewerForkedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the canonical chain +// to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongNewerForkedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is above the committed block. In this case we expect the chain +// to be rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is above the committed block. In this case we expect the canonical +// chain to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then the process crashed. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast sync +// data. The side chain completely nuked by the freezer. +func TestLongReorgedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then the process crashed. In this case we expect the canonical chains +// to be rolled back to the committed block, with everything afterwads deleted. The +// side chain completely nuked by the freezer. +func TestLongReorgedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this case we +// expect the chain to be rolled back to the committed block, with everything +// afterwads kept as fast sync data. The side chain completely nuked by the +// freezer. +func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this case we +// expect the canonical chains to be rolled back to the committed block, with +// everything afterwads deleted. The side chain completely nuked by the freezer. +func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but the process crashed. In this case we expect the +// chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this case we expect the +// chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +func testRepair(t *testing.T, tt *rewindTest) { + // It's hard to follow the test case, visualize the input + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //fmt.Println(tt.dump(true)) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() // Might double close, should be fine + + // Initialize a fresh chain + var ( + genesis = MustCommitGenesis(db, new(genesisT.Genesis)) + engine = ethash.NewFullFaker() + ) + + chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + // If sidechain blocks are needed, make a light chain and import it + var sideblocks types.Blocks + if tt.sidechainBlocks > 0 { + sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x01}) + }) + if _, err := chain.InsertChain(sideblocks); err != nil { + t.Fatalf("Failed to import side chain: %v", err) + } + } + canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x02}) + b.SetDifficulty(big.NewInt(1000000)) + }) + if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + if tt.commitBlock > 0 { + chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + } + if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + // Force run a freeze cycle + type freezer interface { + Freeze(threshold uint64) + Ancients() (uint64, error) + } + db.(freezer).Freeze(tt.freezeThreshold) + + // Set the simulated pivot block + if tt.pivotBlock != nil { + rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) + } + // Pull the plug on the database, simulating a hard crash + db.Close() + + // Start a new blockchain back up and see where the repait leads us + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to reopen persistent database: %v", err) + } + defer db.Close() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, canonblocks) + verifyNoGaps(t, chain, false, sideblocks) + verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks) + verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + } + if frozen, err := db.(freezer).Ancients(); err != nil { + t.Errorf("Failed to retrieve ancient count: %v\n", err) + } else if int(frozen) != tt.expFrozen { + t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) + } +} diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go new file mode 100644 index 0000000000..b9b4cdc833 --- /dev/null +++ b/core/blockchain_sethead_test.go @@ -0,0 +1,1950 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Tests that setting the chain head backwards doesn't leave the database in some +// strange state with gaps in the chain, nor with block data dangling in the future. + +package core + +import ( + "fmt" + "io/ioutil" + "math/big" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/types/genesisT" +) + +// rewindTest is a test case for chain rollback upon user request. +type rewindTest struct { + canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier) + sidechainBlocks int // Number of blocks to generate for the side chain (lighter) + freezeThreshold uint64 // Block number until which to move things into the freezer + commitBlock uint64 // Block number for which to commit the state to disk + pivotBlock *uint64 // Pivot block number in case of fast sync + + setheadBlock uint64 // Block number to set head back to + expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis) + expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis) + expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis) + expHeadHeader uint64 // Block number of the expected head header + expHeadFastBlock uint64 // Block number of the expected head fast sync block + expHeadBlock uint64 // Block number of the expected head full block +} + +func (tt *rewindTest) dump(crash bool) string { + buffer := new(strings.Builder) + + fmt.Fprint(buffer, "Chain:\n G") + for i := 0; i < tt.canonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, " (HEAD)\n") + if tt.sidechainBlocks > 0 { + fmt.Fprintf(buffer, " └") + for i := 0; i < tt.sidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + fmt.Fprintf(buffer, "\n") + + if tt.canonicalBlocks > int(tt.freezeThreshold) { + fmt.Fprint(buffer, "Frozen:\n G") + for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprintf(buffer, "\n\n") + } else { + fmt.Fprintf(buffer, "Frozen: none\n") + } + fmt.Fprintf(buffer, "Commit: G") + if tt.commitBlock > 0 { + fmt.Fprintf(buffer, ", C%d", tt.commitBlock) + } + fmt.Fprint(buffer, "\n") + + if tt.pivotBlock == nil { + fmt.Fprintf(buffer, "Pivot : none\n") + } else { + fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock) + } + if crash { + fmt.Fprintf(buffer, "\nCRASH\n\n") + } else { + fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock) + } + fmt.Fprintf(buffer, "------------------------------\n\n") + + if tt.expFrozen > 0 { + fmt.Fprint(buffer, "Expected in freezer:\n G") + for i := 0; i < tt.expFrozen-1; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprintf(buffer, "\n\n") + } + if tt.expFrozen > 0 { + if tt.expFrozen >= tt.expCanonicalBlocks { + fmt.Fprintf(buffer, "Expected in leveldb: none\n") + } else { + fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1) + for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, "\n") + if tt.expSidechainBlocks > tt.expFrozen { + fmt.Fprintf(buffer, " └") + for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + } + } else { + fmt.Fprint(buffer, "Expected in leveldb:\n G") + for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, "\n") + if tt.expSidechainBlocks > tt.expFrozen { + fmt.Fprintf(buffer, " └") + for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + } + fmt.Fprintf(buffer, "\n") + fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader) + fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock) + if tt.expHeadBlock == 0 { + fmt.Fprintf(buffer, "Expected head block : G\n") + } else { + fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock) + } + return buffer.String() +} + +// Tests a sethead for a short canonical chain where a recent block was already +// committed to disk and then the sethead called. In this case we expect the full +// chain to be rolled back to the committed block. Everything above the sethead +// point should be deleted. In between the committed block and the requested head +// the data can remain as "fast sync" data to avoid redownloading it. +func TestShortSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain where the fast sync pivot point was +// already committed, after which sethead was called. In this case we expect the +// chain to behave like in full sync mode, rolling back to the committed block +// Everything above the sethead point should be deleted. In between the committed +// block and the requested head the data can remain as "fast sync" data to avoid +// redownloading it. +func TestShortFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain where the fast sync pivot point was +// not yet committed, but sethead was called. In this case we expect the chain to +// detect that it was fast syncing and delete everything from the new head, since +// we can just pick up fast syncing from there. The head full block should be set +// to the genesis. +func TestShortFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// test scenario the side chain is below the committed block. In this case we expect +// the canonical full chain to be rolled back to the committed block. Everything +// above the sethead point should be deleted. In between the committed block and +// the requested head the data can remain as "fast sync" data to avoid redownloading +// it. The side chain should be left alone as it was shorter. +func TestShortOldForkedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this test scenario the side chain is below the committed block. In +// this case we expect the canonical full chain to be rolled back to the committed +// block. Everything above the sethead point should be deleted. In between the +// committed block and the requested head the data can remain as "fast sync" data +// to avoid redownloading it. The side chain should be left alone as it was shorter. +func TestShortOldForkedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In this +// test scenario the side chain is below the committed block. In this case we expect +// the chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The head full block +// should be set to the genesis. +func TestShortOldForkedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// test scenario the side chain reaches above the committed block. In this case we +// expect the canonical full chain to be rolled back to the committed block. All +// data above the sethead point should be deleted. In between the committed block +// and the requested head the data can remain as "fast sync" data to avoid having +// to redownload it. The side chain should be truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this case we expect the canonical full chain to be rolled back to +// between the committed block and the requested head the data can remain as +// "fast sync" data to avoid having to redownload it. The side chain should be +// truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In +// this test scenario the side chain reaches above the committed block. In this +// case we expect the chain to detect that it was fast syncing and delete +// everything from the new head, since we can just pick up fast syncing from +// there. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// case we expect the canonical full chain to be rolled back to the committed block. +// All data above the sethead point should be deleted. In between the committed +// block and the requested head the data can remain as "fast sync" data to avoid +// having to redownload it. The side chain should be truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this case we expect the canonical full chain to be rolled back to +// the committed block. All data above the sethead point should be deleted. In +// between the committed block and the requested head the data can remain as +// "fast sync" data to avoid having to redownload it. The side chain should be +// truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In +// this case we expect the chain to detect that it was fast syncing and delete +// everything from the new head, since we can just pick up fast syncing from +// there. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where a recent +// block - newer than the ancient limit - was already committed to disk and then +// sethead was called. In this case we expect the full chain to be rolled back +// to the committed block. Everything above the sethead point should be deleted. +// In between the committed block and the requested head the data can remain as +// "fast sync" data to avoid redownloading it. +func TestLongShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where a recent +// block - older than the ancient limit - was already committed to disk and then +// sethead was called. In this case we expect the full chain to be rolled back +// to the committed block. Since the ancient limit was underflown, everything +// needs to be deleted onwards to avoid creating a gap. +func TestLongDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was already committed, after +// which sethead was called. In this case we expect the full chain to be rolled +// back to the committed block. Everything above the sethead point should be +// deleted. In between the committed block and the requested head the data can +// remain as "fast sync" data to avoid redownloading it. +func TestLongFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was already committed, after +// which sethead was called. In this case we expect the full chain to be rolled +// back to the committed block. Since the ancient limit was underflown, everything +// needs to be deleted onwards to avoid creating a gap. +func TestLongFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was not yet committed, but +// sethead was called. In this case we expect the chain to detect that it was fast +// syncing and delete everything from the new head, since we can just pick up fast +// syncing from there. +func TestLongFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was not yet committed, but +// sethead was called. In this case we expect the chain to detect that it was fast +// syncing and delete everything from the new head, since we can just pick up fast +// syncing from there. +func TestLongFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then sethead was called. In this case we expect the canonical full +// chain to be rolled back to the committed block. Everything above the sethead point +// should be deleted. In between the committed block and the requested head the data +// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked +// by the freezer. +func TestLongOldForkedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then sethead was called. In this case we expect the canonical full +// chain to be rolled back to the committed block. Since the ancient limit was +// underflown, everything needs to be deleted onwards to avoid creating a gap. The +// side chain is nuked by the freezer. +func TestLongOldForkedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// full chain to be rolled back to the committed block. Everything above the +// sethead point should be deleted. In between the committed block and the +// requested head the data can remain as "fast sync" data to avoid redownloading +// it. The side chain is nuked by the freezer. +func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// full chain to be rolled back to the committed block. Since the ancient limit was +// underflown, everything needs to be deleted onwards to avoid creating a gap. The +// side chain is nuked by the freezer. +func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and delete everything from the new head, since we can +// just pick up fast syncing from there. The side chain is completely nuked by the +// freezer. +func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and delete everything from the new head, since we can +// just pick up fast syncing from there. The side chain is completely nuked by the +// freezer. +func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongShallowSetHead. +func TestLongNewerForkedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongDeepSetHead. +func TestLongNewerForkedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is above the committed block. In this case the freezer will delete +// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead. +func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is above the committed block. In this case the freezer will delete +// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead. +func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead. +func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead. +func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then sethead was called. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongShallowSetHead. +func TestLongReorgedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then sethead was called. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongDeepSetHead. +func TestLongReorgedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this case the +// freezer will delete the sidechain since it's dangling, reverting to +// TestLongFastSyncedShallowSetHead. +func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this case the +// freezer will delete the sidechain since it's dangling, reverting to +// TestLongFastSyncedDeepSetHead. +func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this case we expect the +// chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The side chain is +// completely nuked by the freezer. +func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this case we expect the +// chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The side chain is +// completely nuked by the freezer. +func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +func testSetHead(t *testing.T, tt *rewindTest) { + // It's hard to follow the test case, visualize the input + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //fmt.Println(tt.dump(false)) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() + + // Initialize a fresh chain + var ( + genesis = MustCommitGenesis(db, new(genesisT.Genesis)) + engine = ethash.NewFullFaker() + ) + chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + // If sidechain blocks are needed, make a light chain and import it + var sideblocks types.Blocks + if tt.sidechainBlocks > 0 { + sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x01}) + }) + if _, err := chain.InsertChain(sideblocks); err != nil { + t.Fatalf("Failed to import side chain: %v", err) + } + } + canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x02}) + b.SetDifficulty(big.NewInt(1000000)) + }) + if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + if tt.commitBlock > 0 { + chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + } + if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + // Manually dereference anything not committed to not have to work with 128+ tries + for _, block := range sideblocks { + chain.stateCache.TrieDB().Dereference(block.Root()) + } + for _, block := range canonblocks { + chain.stateCache.TrieDB().Dereference(block.Root()) + } + // Force run a freeze cycle + type freezer interface { + Freeze(threshold uint64) + Ancients() (uint64, error) + } + db.(freezer).Freeze(tt.freezeThreshold) + + // Set the simulated pivot block + if tt.pivotBlock != nil { + rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) + } + // Set the head of the chain back to the requested number + chain.SetHead(tt.setheadBlock) + + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, canonblocks) + verifyNoGaps(t, chain, false, sideblocks) + verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks) + verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + } + if frozen, err := db.(freezer).Ancients(); err != nil { + t.Errorf("Failed to retrieve ancient count: %v\n", err) + } else if int(frozen) != tt.expFrozen { + t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) + } +} + +// verifyNoGaps checks that there are no gaps after the initial set of blocks in +// the database and errors if found. +func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) { + t.Helper() + + var end uint64 + for i := uint64(0); i <= uint64(len(inserted)); i++ { + header := chain.GetHeaderByNumber(i) + if header == nil && end == 0 { + end = i + } + if header != nil && end > 0 { + if canonical { + t.Errorf("Canonical header gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain header gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } + end = 0 + for i := uint64(0); i <= uint64(len(inserted)); i++ { + block := chain.GetBlockByNumber(i) + if block == nil && end == 0 { + end = i + } + if block != nil && end > 0 { + if canonical { + t.Errorf("Canonical block gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain block gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } + end = 0 + for i := uint64(1); i <= uint64(len(inserted)); i++ { + receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()) + if receipts == nil && end == 0 { + end = i + } + if receipts != nil && end > 0 { + if canonical { + t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } +} + +// verifyCutoff checks that there are no chain data available in the chain after +// the specified limit, but that it is available before. +func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) { + t.Helper() + + for i := 1; i <= len(inserted); i++ { + if i <= head { + if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil { + if canonical { + t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil { + if canonical { + t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil { + if canonical { + t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + } else { + if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil { + if canonical { + t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil { + if canonical { + t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil { + if canonical { + t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + } + } +} + +// uint64ptr is a weird helper to allow 1-line constant pointer creation. +func uint64ptr(n uint64) *uint64 { + return &n +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 252f643f27..26ee56cab7 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/types/goethereum" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) // So we can deterministically seed different blockchains @@ -684,12 +685,12 @@ func TestFastVsFullChains(t *testing.T) { } if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) { + } else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) { t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) } - if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { + if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) { t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) } } @@ -734,12 +735,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { return db, func() { os.RemoveAll(dir) } } // Configure a subchain to roll back - remove := []common.Hash{} - for _, block := range blocks[height/2:] { - remove = append(remove, block.Hash()) - } + remove := blocks[height/2].NumberU64() + // Create a small assertion method to check the three heads assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) { + t.Helper() + if num := chain.CurrentBlock().NumberU64(); num != block { t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block) } @@ -753,14 +754,18 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Import the chain as an archive node and ensure all pointers are updated archiveDb, delfn := makeDb() defer delfn() - archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + + archiveCaching := *defaultCacheConfig + archiveCaching.TrieDirtyDisabled = true + + archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := archive.InsertChain(blocks); err != nil { t.Fatalf("failed to process block %d: %v", n, err) } defer archive.Stop() assert(t, "archive", archive, height, height, height) - archive.Rollback(remove) + archive.SetHead(remove - 1) assert(t, "archive", archive, height/2, height/2, height/2) // Import the chain as a non-archive node and ensure all pointers are updated @@ -780,7 +785,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to insert receipt %d: %v", n, err) } assert(t, "fast", fast, height, height, 0) - fast.Rollback(remove) + fast.SetHead(remove - 1) assert(t, "fast", fast, height/2, height/2, 0) // Import the chain as a ancient-first node and ensure all pointers are updated @@ -796,12 +801,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to insert receipt %d: %v", n, err) } assert(t, "ancient", ancient, height, height, 0) - ancient.Rollback(remove) - assert(t, "ancient", ancient, height/2, height/2, 0) - if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 { - t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen) - } + ancient.SetHead(remove - 1) + assert(t, "ancient", ancient, 0, 0, 0) + if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 { + t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen) + } // Import the chain as a light node and ensure all pointers are updated lightDb, delfn := makeDb() defer delfn() @@ -812,7 +817,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { defer light.Stop() assert(t, "light", light, height, 0, 0) - light.Rollback(remove) + light.SetHead(remove - 1) assert(t, "light", light, height/2, 0, 0) } @@ -1592,6 +1597,7 @@ func TestBlockchainRecovery(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) @@ -1609,6 +1615,7 @@ func TestBlockchainRecovery(t *testing.T) { if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { t.Fatalf("failed to insert receipt %d: %v", n, err) } + rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior ancient.Stop() // Destroy head fast block manually @@ -1919,11 +1926,9 @@ func testInsertKnownChainData(t *testing.T, typ string) { asserter(t, blocks[len(blocks)-1]) // Import a long canonical chain with some known data as prefix. - var rollback []common.Hash - for i := len(blocks) / 2; i < len(blocks); i++ { - rollback = append(rollback, blocks[i].Hash()) - } - chain.Rollback(rollback) + rollback := blocks[len(blocks)/2].NumberU64() + + chain.SetHead(rollback - 1) if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { t.Fatalf("failed to insert chain data: %v", err) } @@ -1943,11 +1948,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { asserter(t, blocks3[len(blocks3)-1]) // Rollback the heavier chain and re-insert the longer chain again - for i := 0; i < len(blocks3); i++ { - rollback = append(rollback, blocks3[i].Hash()) - } - chain.Rollback(rollback) - + chain.SetHead(rollback - 1) if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { t.Fatalf("failed to insert chain data: %v", err) } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index fee924aea4..f285d5b505 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -162,7 +162,7 @@ func TestCreation(t *testing.T) { {9573001, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {10500838, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {10500839, ID{Hash: checksumToBytes(0x9007bfcc), Next: 100000000}}, - {10500840, ID{Hash: checksumToBytes(0x9007bfcc), Next: 100000000}}, + {100000000, ID{Hash: checksumToBytes(0x91a12654), Next: 0}}, }, }, { @@ -178,8 +178,8 @@ func TestCreation(t *testing.T) { {1705549, ID{Hash: checksumToBytes(0x8f3698e0), Next: 2200013}}, {1705550, ID{Hash: checksumToBytes(0x8f3698e0), Next: 2200013}}, {2200012, ID{Hash: checksumToBytes(0x8f3698e0), Next: 2200013}}, - {2200013, ID{Hash: checksumToBytes(0x6f402821), Next: 100000000}}, - {2200014, ID{Hash: checksumToBytes(0x6f402821), Next: 100000000}}, + {2200013, ID{Hash: checksumToBytes(0x6f402821), Next: 0}}, + {2200014, ID{Hash: checksumToBytes(0x6f402821), Next: 0}}, }, }, { @@ -197,8 +197,9 @@ func TestCreation(t *testing.T) { {301243, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {301244, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {999982, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, - {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, - {999984, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, + {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 100000000}}, + {999984, ID{Hash: checksumToBytes(0xf42f5539), Next: 100000000}}, + {100000000, ID{Hash: checksumToBytes(0x06b793f5), Next: 0}}, }, }, } @@ -321,7 +322,7 @@ func TestGatherForks(t *testing.T) { { "classic", params.ClassicChainConfig, - []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839}, + []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839, 100000000}, }, { "mainnet", @@ -331,7 +332,7 @@ func TestGatherForks(t *testing.T) { { "mordor", params.MordorChainConfig, - []uint64{301_243, 999_983}, + []uint64{301_243, 999_983, 100000000}, }, { "kotti", diff --git a/core/genesis.go b/core/genesis.go index 7c01dfb350..5472236bb3 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) // SetupGenesisBlock writes or updates the genesis block in db. @@ -199,7 +200,7 @@ func GenesisToBlock(g *genesisT.Genesis, db ethdb.Database) *types.Block { statedb.Commit(false) statedb.Database().TrieDB().Commit(root, true, nil) - return types.NewBlock(head, nil, nil, nil) + return types.NewBlock(head, nil, nil, nil, new(trie.Trie)) } // CommitGenesis writes the block and state of a genesis specification to the database. diff --git a/core/headerchain.go b/core/headerchain.go index a17d163c99..77a0506c2c 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -494,8 +494,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { type ( // UpdateHeadBlocksCallback is a callback function that is called by SetHead - // before head header is updated. - UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) + // before head header is updated. The method will return the actual block it + // updated the head to (missing state) and a flag if setHead should continue + // rewinding till that forcefully (exceeded ancient limits) + UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) // DeleteBlockContentCallback is a callback function that is called by SetHead // before each header is deleted. @@ -508,9 +510,10 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d var ( parentHash common.Hash batch = hc.chainDb.NewBatch() + origin = true ) for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { - hash, num := hdr.Hash(), hdr.Number.Uint64() + num := hdr.Number.Uint64() // Rewind block chain to new head. parent := hc.GetHeader(hdr.ParentHash, num-1) @@ -518,16 +521,21 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d parent = hc.genesisHeader } parentHash = hdr.ParentHash + // Notably, since geth has the possibility for setting the head to a low // height which is even lower than ancient head. // In order to ensure that the head is always no higher than the data in - // the database(ancient store or active store), we need to update head + // the database (ancient store or active store), we need to update head // first then remove the relative data from the database. // // Update head first(head fast block, head full block) before deleting the data. markerBatch := hc.chainDb.NewBatch() if updateFn != nil { - updateFn(markerBatch, parent) + newHead, force := updateFn(markerBatch, parent) + if force && newHead < head { + log.Warn("Force rewinding till ancient limit", "head", newHead) + head = newHead + } } // Update head header then. rawdb.WriteHeadHeaderHash(markerBatch, parentHash) @@ -538,14 +546,34 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d hc.currentHeaderHash = parentHash headHeaderGauge.Update(parent.Number.Int64()) - // Remove the relative data from the database. - if delFn != nil { - delFn(batch, hash, num) + // If this is the first iteration, wipe any leftover data upwards too so + // we don't end up with dangling daps in the database + var nums []uint64 + if origin { + for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { + nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path + } + origin = false + } + nums = append(nums, num) + + // Remove the related data from the database on all sidechains + for _, num := range nums { + // Gather all the side fork hashes + hashes := rawdb.ReadAllHashes(hc.chainDb, num) + if len(hashes) == 0 { + // No hashes in the database whatsoever, probably frozen already + hashes = append(hashes, hdr.Hash()) + } + for _, hash := range hashes { + if delFn != nil { + delFn(batch, hash, num) + } + rawdb.DeleteHeader(batch, hash, num) + rawdb.DeleteTd(batch, hash, num) + } + rawdb.DeleteCanonicalHash(batch, num) } - // Rewind header chain to new head. - rawdb.DeleteHeader(batch, hash, num) - rawdb.DeleteTd(batch, hash, num) - rawdb.DeleteCanonicalHash(batch, num) } // Flush all accumulated deletions. if err := batch.Write(); err != nil { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index b474479a39..9ff607ad91 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -187,6 +187,32 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { } } +// ReadLastPivotNumber retrieves the number of the last pivot block. If the node +// full synced, the last pivot will always be nil. +func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { + data, _ := db.Get(lastPivotKey) + if len(data) == 0 { + return nil + } + var pivot uint64 + if err := rlp.DecodeBytes(data, &pivot); err != nil { + log.Error("Invalid pivot block number in database", "err", err) + return nil + } + return &pivot +} + +// WriteLastPivotNumber stores the number of the last pivot block. +func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { + enc, err := rlp.EncodeToBytes(pivot) + if err != nil { + log.Crit("Failed to encode pivot block number", "err", err) + } + if err := db.Put(lastPivotKey, enc); err != nil { + log.Crit("Failed to store pivot block number", "err", err) + } +} + // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow // reporting correct numbers across restarts. func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 { diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 3eba2a3b4e..074c24d8fe 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -84,7 +84,7 @@ func TestBodyStorage(t *testing.T) { WriteBody(db, hash, 0, body) if entry := ReadBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) } if entry := ReadBodyRLP(db, hash, 0); entry == nil { @@ -138,7 +138,7 @@ func TestBlockStorage(t *testing.T) { } if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 49d00f9900..87338c62bf 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -18,6 +18,7 @@ package rawdb import ( "bytes" + "hash" "math/big" "testing" @@ -26,8 +27,33 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { tests := []struct { @@ -73,7 +99,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 9f517190bc..6e540df010 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -80,20 +80,3 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg ctypes.Chai log.Crit("Failed to store chain config", "err", err) } } - -// ReadPreimage retrieves a single preimage of the provided hash. -func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(preimageKey(hash)) - return data -} - -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go new file mode 100644 index 0000000000..6112de03ad --- /dev/null +++ b/core/rawdb/accessors_state.go @@ -0,0 +1,96 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// ReadPreimage retrieves a single preimage of the provided hash. +func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(preimageKey(hash)) + return data +} + +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} + +// ReadCode retrieves the contract code of the provided code hash. +func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { + // Try with the legacy code scheme first, if not then try with current + // scheme. Since most of the code will be found with legacy scheme. + // + // todo(rjl493456442) change the order when we forcibly upgrade the code + // scheme with snapshot. + data, _ := db.Get(hash[:]) + if len(data) != 0 { + return data + } + return ReadCodeWithPrefix(db, hash) +} + +// ReadCodeWithPrefix retrieves the contract code of the provided code hash. +// The main difference between this function and ReadCode is this function +// will only check the existence with latest scheme(with prefix). +func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(codeKey(hash)) + return data +} + +// WriteCode writes the provided contract code database. +func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { + if err := db.Put(codeKey(hash), code); err != nil { + log.Crit("Failed to store contract code", "err", err) + } +} + +// DeleteCode deletes the specified contract code from the database. +func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeKey(hash)); err != nil { + log.Crit("Failed to delete contract code", "err", err) + } +} + +// ReadTrieNode retrieves the trie node of the provided hash. +func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(hash.Bytes()) + return data +} + +// WriteTrieNode writes the provided trie node database. +func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store trie node", "err", err) + } +} + +// DeleteTrieNode deletes the specified trie node from the database. +func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete trie node", "err", err) + } +} diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index c99a97c5f8..c635cd2f12 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,11 +34,11 @@ func TestChainIterator(t *testing.T) { var txs []*types.Transaction for i := uint64(0); i <= 10; i++ { if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block } else { tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) } WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 1a6c34fcaa..b252c7ed4d 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "strings" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -54,6 +55,22 @@ func (frdb *freezerdb) Close() error { return nil } +// Freeze is a helper method used for external testing to trigger and block until +// a freeze cycle completes, without having to sleep for a minute to trigger the +// automatic background run. +func (frdb *freezerdb) Freeze(threshold uint64) { + // Set the freezer threshold to a temporary value + defer func(old uint64) { + atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old) + }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold)) + atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold) + + // Trigger a freeze cycle and block until it's done + trigger := make(chan struct{}, 1) + frdb.AncientStore.(*freezer).trigger <- trigger + <-trigger +} + // nofreezedb is a database wrapper that disables freezer data retrievals. type nofreezedb struct { ethdb.KeyValueStore @@ -186,7 +203,7 @@ Please set --ancient.rpc to the correct path, and/or review the remote freezer's } } // Freezer is consistent with the key-value database, permit combining the two - go freezeRemote(db, frdb, frdb.quit) + go freezeRemote(db, frdb, frdb.threshold, frdb.quit, frdb.trigger) return &freezerdb{ KeyValueStore: db, @@ -336,6 +353,7 @@ func InspectDatabase(db ethdb.Database) error { numHashPairing common.StorageSize hashNumPairing common.StorageSize trieSize common.StorageSize + codeSize common.StorageSize txlookupSize common.StorageSize accountSnapSize common.StorageSize storageSnapSize common.StorageSize @@ -394,6 +412,8 @@ func InspectDatabase(db ethdb.Database) error { chtTrieNodes += size case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: bloomTrieNodes += size + case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength: + codeSize += size case len(key) == common.HashLength: trieSize += size default: @@ -433,6 +453,7 @@ func InspectDatabase(db ethdb.Database) error { {"Key-Value store", "Block hash->number", hashNumPairing.String()}, {"Key-Value store", "Transaction index", txlookupSize.String()}, {"Key-Value store", "Bloombit index", bloomBitsSize.String()}, + {"Key-Value store", "Contract codes", codeSize.String()}, {"Key-Value store", "Trie nodes", trieSize.String()}, {"Key-Value store", "Trie preimages", preimageSize.String()}, {"Key-Value store", "Account snapshot", accountSnapSize.String()}, diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 31eaa444c8..1da5694264 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -70,12 +70,16 @@ type freezer struct { // WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). - frozen uint64 // Number of blocks already frozen + frozen uint64 // Number of blocks already frozen + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) tables map[string]*freezerTable // Data tables for storing everything instanceLock fileutil.Releaser // File-system lock to prevent double opens - quit chan struct{} - closeOnce sync.Once + + trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + + quit chan struct{} + closeOnce sync.Once } // newFreezer creates a chain freezer that moves ancient chain data into @@ -102,8 +106,10 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { } // Open all the supported data tables freezer := &freezer{ + threshold: vars.FullImmutabilityThreshold, tables: make(map[string]*freezerTable), instanceLock: lock, + trigger: make(chan chan struct{}), quit: make(chan struct{}), } for name, disableSnappy := range freezerNoSnappy { @@ -261,7 +267,10 @@ func (f *freezer) Sync() error { func (f *freezer) freeze(db ethdb.KeyValueStore) { nfdb := &nofreezedb{KeyValueStore: db} - backoff := false + var ( + backoff bool + triggered chan struct{} // Used in tests + ) for { select { case <-f.quit: @@ -270,9 +279,16 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { default: } if backoff { + // If we were doing a manual trigger, notify it + if triggered != nil { + triggered <- struct{}{} + triggered = nil + } select { case <-time.NewTimer(freezerRecheckInterval).C: backoff = false + case triggered = <-f.trigger: + backoff = false case <-f.quit: return } @@ -284,19 +300,22 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { backoff = true continue } + //here number := ReadHeaderNumber(nfdb, hash) + threshold := atomic.LoadUint64(&f.threshold) + switch { case number == nil: log.Error("Current full block number unavailable", "hash", hash) backoff = true continue - case *number < vars.FullImmutabilityThreshold: - log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", vars.FullImmutabilityThreshold) + case *number < threshold: + log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) backoff = true continue - case *number-vars.FullImmutabilityThreshold <= f.frozen: + case *number-threshold <= f.frozen: log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen) backoff = true continue @@ -308,7 +327,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { continue } // Seems we have data ready to be frozen, process in usable batches - limit := *number - vars.FullImmutabilityThreshold + limit := *number - threshold if limit-f.frozen > freezerBatchLimit { limit = f.frozen + freezerBatchLimit } @@ -317,7 +336,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { first = f.frozen ancients = make([]common.Hash, 0, limit-f.frozen) ) - for f.frozen < limit { + for f.frozen <= limit { // Retrieves all the components of the canonical block hash := ReadCanonicalHash(nfdb, f.frozen) if hash == (common.Hash{}) { @@ -368,11 +387,15 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { log.Crit("Failed to delete frozen canonical blocks", "err", err) } batch.Reset() - // Wipe out side chain also. + + // Wipe out side chains also and track dangling side chians + var dangling []common.Hash for number := first; number < f.frozen; number++ { // Always keep the genesis block in active database if number != 0 { - for _, hash := range ReadAllHashes(db, number) { + dangling = ReadAllHashes(db, number) + for _, hash := range dangling { + log.Trace("Deleting side chain", "number", number, "hash", hash) DeleteBlock(batch, hash, number) } } @@ -380,6 +403,41 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { if err := batch.Write(); err != nil { log.Crit("Failed to delete frozen side blocks", "err", err) } + batch.Reset() + + // Step into the future and delete and dangling side chains + if f.frozen > 0 { + tip := f.frozen + for len(dangling) > 0 { + drop := make(map[common.Hash]struct{}) + for _, hash := range dangling { + log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + drop[hash] = struct{}{} + } + children := ReadAllHashes(db, tip) + for i := 0; i < len(children); i++ { + // Dig up the child and ensure it's dangling + child := ReadHeader(nfdb, children[i], tip) + if child == nil { + log.Error("Missing dangling header", "number", tip, "hash", children[i]) + continue + } + if _, ok := drop[child.ParentHash]; !ok { + children = append(children[:i], children[i+1:]...) + i-- + continue + } + // Delete all block data associated with the child + log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) + DeleteBlock(batch, children[i], tip) + } + dangling = children + tip++ + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete dangling side blocks", "err", err) + } + } // Log something friendly for the user context := []interface{}{ "blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1, diff --git a/core/rawdb/freezer_remote_client.go b/core/rawdb/freezer_remote_client.go index e990fa6517..d0ad602b99 100644 --- a/core/rawdb/freezer_remote_client.go +++ b/core/rawdb/freezer_remote_client.go @@ -1,6 +1,7 @@ package rawdb import ( + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -14,8 +15,11 @@ import ( // The struct's methods delegate the business logic to an external server // that is responsible for managing an actual ancient store. type FreezerRemoteClient struct { - client *rpc.Client - quit chan struct{} + client *rpc.Client + quit chan struct{} + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + closeOnce sync.Once } const ( @@ -36,7 +40,10 @@ func newFreezerRemoteClient(endpoint string) (*FreezerRemoteClient, error) { return nil, err } return &FreezerRemoteClient{ - client: client, + client: client, + threshold: vars.FullImmutabilityThreshold, + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } @@ -110,10 +117,13 @@ func (api *FreezerRemoteClient) Sync() error { // to exist unmodified and untouched by the remote freezer client, which demands // a slightly different signature, and uses the freezer.Ancients() method instead // of direct access to the atomic freezer.frozen field. -func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan struct{}) { +func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, threshold uint64, quitChan chan struct{}, triggerChanChan chan chan struct{}) { nfdb := &nofreezedb{KeyValueStore: db} - backoff := false + var ( + backoff bool + triggered chan struct{} // Used in tests + ) for { select { case <-quitChan: @@ -122,14 +132,20 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st default: } if backoff { + // If we were doing a manual trigger, notify it + if triggered != nil { + triggered <- struct{}{} + triggered = nil + } select { case <-time.NewTimer(freezerRecheckInterval).C: backoff = false + case triggered = <-triggerChanChan: + backoff = false case <-quitChan: return } } - // Retrieve the freezing threshold. hash := ReadHeadBlockHash(nfdb) if hash == (common.Hash{}) { @@ -137,25 +153,25 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st backoff = true continue } - numFrozen, err := f.Ancients() if err != nil { log.Crit("ancient db freeze", "error", err) } - number := ReadHeaderNumber(nfdb, hash) + // threshold := atomic.LoadUint64(&f.threshold) + switch { case number == nil: log.Error("Current full block number unavailable", "hash", hash) backoff = true continue - case *number < vars.FullImmutabilityThreshold: - log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", vars.FullImmutabilityThreshold) + case *number < threshold: + log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) backoff = true continue - case *number-vars.FullImmutabilityThreshold <= numFrozen: + case *number-threshold <= numFrozen: log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", numFrozen) backoff = true continue @@ -167,7 +183,7 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st continue } // Seems we have data ready to be frozen, process in usable batches - limit := *number - vars.FullImmutabilityThreshold + limit := *number - threshold if limit-numFrozen > freezerBatchLimit { limit = numFrozen + freezerBatchLimit } @@ -176,7 +192,7 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st first = numFrozen ancients = make([]common.Hash, 0, limit-numFrozen) ) - for numFrozen < limit { + for numFrozen <= limit { // Retrieves all the components of the canonical block hash := ReadCanonicalHash(nfdb, numFrozen) if hash == (common.Hash{}) { @@ -208,7 +224,6 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st if err := f.AppendAncient(numFrozen, hash[:], header, body, receipts, td); err != nil { break } - numFrozen++ // Manually increment numFrozen (save a call) ancients = append(ancients, hash) } // Batch of blocks have been frozen, flush them before wiping from leveldb @@ -228,11 +243,15 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st log.Crit("Failed to delete frozen canonical blocks", "err", err) } batch.Reset() - // Wipe out side chain also. + + // Wipe out side chains also and track dangling side chians + var dangling []common.Hash for number := first; number < numFrozen; number++ { // Always keep the genesis block in active database if number != 0 { - for _, hash := range ReadAllHashes(db, number) { + dangling = ReadAllHashes(db, number) + for _, hash := range dangling { + log.Trace("Deleting side chain", "number", number, "hash", hash) DeleteBlock(batch, hash, number) } } @@ -240,6 +259,41 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st if err := batch.Write(); err != nil { log.Crit("Failed to delete frozen side blocks", "err", err) } + batch.Reset() + + // Step into the future and delete and dangling side chains + if numFrozen > 0 { + tip := numFrozen + for len(dangling) > 0 { + drop := make(map[common.Hash]struct{}) + for _, hash := range dangling { + log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + drop[hash] = struct{}{} + } + children := ReadAllHashes(db, tip) + for i := 0; i < len(children); i++ { + // Dig up the child and ensure it's dangling + child := ReadHeader(nfdb, children[i], tip) + if child == nil { + log.Error("Missing dangling header", "number", tip, "hash", children[i]) + continue + } + if _, ok := drop[child.ParentHash]; !ok { + children = append(children[:i], children[i+1:]...) + i-- + continue + } + // Delete all block data associated with the child + log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) + DeleteBlock(batch, children[i], tip) + } + dangling = children + tip++ + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete dangling side blocks", "err", err) + } + } // Log something friendly for the user context := []interface{}{ "blocks", numFrozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", numFrozen - 1, diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index e11a27430f..b9d8a274a8 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -330,7 +330,8 @@ func (t *freezerTable) truncate(items uint64) error { defer t.lock.Unlock() // If our item count is correct, don't do anything - if atomic.LoadUint64(&t.items) <= items { + existing := atomic.LoadUint64(&t.items) + if existing <= items { return nil } // We need to truncate, save the old size for metrics tracking @@ -339,7 +340,11 @@ func (t *freezerTable) truncate(items uint64) error { return err } // Something's out of sync, truncate the table's offset index - t.logger.Warn("Truncating freezer table", "items", t.items, "limit", items) + log := t.logger.Debug + if existing > items+1 { + log = t.logger.Warn // Only loud warn if we delete multiple items + } + log("Truncating freezer table", "items", existing, "limit", items) if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil { return err } diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b0a7055cfb..03856de1f8 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -18,6 +18,7 @@ package rawdb import ( + "bytes" "encoding/binary" "github.com/ethereum/go-ethereum/common" @@ -38,6 +39,9 @@ var ( // headFastBlockKey tracks the latest known incomplete block's hash during fast sync. headFastBlockKey = []byte("LastFast") + // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead). + lastPivotKey = []byte("LastPivot") + // fastTrieProgressKey tracks the number of trie entries imported during fast sync. fastTrieProgressKey = []byte("TrieSync") @@ -66,6 +70,7 @@ var ( bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value + codePrefix = []byte("c") // codePrefix + code hash -> account code preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage ConfigPrefix = []byte("ethereum-config-") // config prefix for the db @@ -220,6 +225,20 @@ func preimageKey(hash common.Hash) []byte { return append(preimagePrefix, hash.Bytes()...) } +// codeKey = codePrefix + hash +func codeKey(hash common.Hash) []byte { + return append(codePrefix, hash.Bytes()...) +} + +// IsCodeKey reports whether the given byte slice is the key of contract code, +// if so return the raw code hash as well. +func IsCodeKey(key []byte) (bool, []byte) { + if bytes.HasPrefix(key, codePrefix) && len(key) == common.HashLength+len(codePrefix) { + return true, key[len(codePrefix):] + } + return false, nil +} + // ConfigKey = ConfigPrefix + hash func ConfigKey(hash common.Hash) []byte { return append(ConfigPrefix, hash.Bytes()...) diff --git a/core/state/database.go b/core/state/database.go index 7bcec6d003..a9342f5179 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -17,9 +17,12 @@ package state import ( + "errors" "fmt" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" @@ -28,6 +31,9 @@ import ( const ( // Number of codehash->size associations to keep. codeSizeCacheSize = 100000 + + // Cache size granted for caching clean code. + codeCacheSize = 64 * 1024 * 1024 ) // Database wraps access to tries and contract code. @@ -111,12 +117,14 @@ func NewDatabaseWithCache(db ethdb.Database, cache int, journal string) Database return &cachingDB{ db: trie.NewDatabaseWithCache(db, cache, journal), codeSizeCache: csc, + codeCache: fastcache.New(codeCacheSize), } } type cachingDB struct { db *trie.Database codeSizeCache *lru.Cache + codeCache *fastcache.Cache } // OpenTrie opens the main account trie at a specific root hash. @@ -141,11 +149,32 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { // ContractCode retrieves a particular contract's code. func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { - code, err := db.db.Node(codeHash) - if err == nil { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return code, nil + } + code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) + db.codeSizeCache.Add(codeHash, len(code)) + return code, nil + } + return nil, errors.New("not found") +} + +// ContractCodeWithPrefix retrieves a particular contract's code. If the +// code can't be found in the cache, then check the existence with **new** +// db scheme. +func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return code, nil + } + code := rawdb.ReadCodeWithPrefix(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) db.codeSizeCache.Add(codeHash, len(code)) + return code, nil } - return code, err + return nil, errors.New("not found") } // ContractCodeSize retrieves a particular contracts code's size. diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index 5060f7a651..d1afe9ca3e 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -28,6 +28,7 @@ import ( func TestNodeIteratorCoverage(t *testing.T) { // Create some arbitrary test state to iterate db, root, _ := makeTestState() + db.TrieDB().Commit(root, false, nil) state, err := New(root, db, nil) if err != nil { @@ -42,7 +43,10 @@ func TestNodeIteratorCoverage(t *testing.T) { } // Cross check the iterated hashes and the database/nodepool content for hash := range hashes { - if _, err := db.TrieDB().Node(hash); err != nil { + if _, err = db.TrieDB().Node(hash); err != nil { + _, err = db.ContractCode(common.Hash{}, hash) + } + if err != nil { t.Errorf("failed to retrieve reported node %x", hash) } } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index c3a4a552ff..cf9b2b0393 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -54,9 +54,11 @@ type generatorStats struct { // Log creates an contextual log with the given message and the context pulled // from the internally maintained statistics. -func (gs *generatorStats) Log(msg string, marker []byte) { +func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { var ctx []interface{} - + if root != (common.Hash{}) { + ctx = append(ctx, []interface{}{"root", root}...) + } // Figure out whether we're after or within an account switch len(marker) { case common.HashLength: @@ -120,7 +122,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i func (dl *diskLayer) generate(stats *generatorStats) { // If a database wipe is in operation, wait until it's done if stats.wiping != nil { - stats.Log("Wiper running, state snapshotting paused", dl.genMarker) + stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker) select { // If wiper is done, resume normal mode of operation case <-stats.wiping: @@ -137,13 +139,13 @@ func (dl *diskLayer) generate(stats *generatorStats) { accTrie, err := trie.NewSecure(dl.root, dl.triedb) if err != nil { // The account trie is missing (GC), surf the chain until one becomes available - stats.Log("Trie missing, state snapshotting paused", dl.genMarker) + stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) abort := <-dl.genAbort abort <- stats return } - stats.Log("Resuming state snapshot generation", dl.genMarker) + stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) var accMarker []byte if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that @@ -192,7 +194,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { dl.lock.Unlock() } if abort != nil { - stats.Log("Aborting state snapshot generation", accountHash[:]) + stats.Log("Aborting state snapshot generation", dl.root, accountHash[:]) abort <- stats return } @@ -230,7 +232,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { dl.lock.Unlock() } if abort != nil { - stats.Log("Aborting state snapshot generation", append(accountHash[:], storeIt.Key...)) + stats.Log("Aborting state snapshot generation", dl.root, append(accountHash[:], storeIt.Key...)) abort <- stats return } @@ -238,7 +240,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { } } if time.Since(logged) > 8*time.Second { - stats.Log("Generating state snapshot", accIt.Key) + stats.Log("Generating state snapshot", dl.root, accIt.Key) logged = time.Now() } // Some account processed, unmark the marker diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 0e73454168..fc1053f818 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -193,7 +193,7 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { dl.genAbort <- abort if stats = <-abort; stats != nil { - stats.Log("Journalling in-progress snapshot", dl.genMarker) + stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker) } } // Ensure the layer didn't get stale diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 8ea56d7314..f6c5a6a9a8 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -263,6 +263,13 @@ func (t *Tree) Cap(root common.Hash, layers int) error { if !ok { return fmt.Errorf("snapshot [%#x] is disk layer", root) } + // If the generator is still running, use a more aggressive cap + diff.origin.lock.RLock() + if diff.origin.genMarker != nil && layers > 8 { + layers = 8 + } + diff.origin.lock.RUnlock() + // Run the internal capping and discard all stale layers t.lock.Lock() defer t.lock.Unlock() diff --git a/core/state/state_object.go b/core/state/state_object.go index 015a673781..26ab67e1ad 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -375,22 +375,21 @@ func (s *stateObject) CommitTrie(db Database) error { return err } -// AddBalance removes amount from c's balance. +// AddBalance adds amount to s's balance. // It is used to add funds to the destination account of a transfer. func (s *stateObject) AddBalance(amount *big.Int) { - // EIP158: We must check emptiness for the objects such that the account + // EIP161: We must check emptiness for the objects such that the account // clearing (0,0,0 objects) can take effect. if amount.Sign() == 0 { if s.empty() { s.touch() } - return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) } -// SubBalance removes amount from c's balance. +// SubBalance removes amount from s's balance. // It is used to remove funds from the origin account of a transfer. func (s *stateObject) SubBalance(amount *big.Int) { if amount.Sign() == 0 { @@ -455,7 +454,7 @@ func (s *stateObject) Code(db Database) []byte { } // CodeSize returns the size of the contract code associated with this object, -// or zero if none. This methos is an almost mirror of Code, but uses a cache +// or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. func (s *stateObject) CodeSize(db Database) int { if s.code != nil { diff --git a/core/state/statedb.go b/core/state/statedb.go index 17fb755f54..4ac03df604 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -42,9 +43,6 @@ type revision struct { var ( // emptyRoot is the known root hash of an empty trie. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256Hash(nil) ) type proofList [][]byte @@ -58,7 +56,7 @@ func (n *proofList) Delete(key []byte) error { panic("not supported") } -// StateDBs within the ethereum protocol are used to store anything +// StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: // * Contracts @@ -115,7 +113,7 @@ type StateDB struct { SnapshotCommits time.Duration } -// Create a new state from a given trie. +// New creates a new state from a given trie. func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { tr, err := db.OpenTrie(root) if err != nil { @@ -250,7 +248,7 @@ func (s *StateDB) Empty(addr common.Address) bool { return so == nil || so.empty() } -// Retrieve the balance from the given address or 0 if object not found +// GetBalance retrieves the balance from the given address or 0 if object not found func (s *StateDB) GetBalance(addr common.Address) *big.Int { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -318,7 +316,7 @@ func (s *StateDB) GetProof(a common.Address) ([][]byte, error) { return [][]byte(proof), err } -// GetProof returns the StorageProof for given key +// GetStorageProof returns the StorageProof for given key func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { var proof proofList trie := s.StorageTrie(a) @@ -560,7 +558,7 @@ func (s *StateDB) setStateObject(object *stateObject) { s.stateObjects[object.Address()] = object } -// Retrieve a state object or create a new state object if nil. +// GetOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { stateObject := s.getStateObject(addr) if stateObject == nil { @@ -589,7 +587,10 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } s.setStateObject(newobj) - return newobj, prev + if prev != nil && !prev.deleted { + return newobj, prev + } + return newobj, nil } // CreateAccount explicitly creates a state object. If a state object with the address @@ -817,11 +818,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { s.IntermediateRoot(deleteEmptyObjects) // Commit objects to the trie, measuring the elapsed time + codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object if obj.code != nil && obj.dirtyCode { - s.db.TrieDB().InsertBlob(common.BytesToHash(obj.CodeHash()), obj.code) + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false } // Write any storage changes in the state object to its storage trie @@ -833,6 +835,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) } + if codeWriter.ValueSize() > 0 { + if err := codeWriter.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + } + } // Write the account trie changes, measuing the amount of wasted time var start time.Time if metrics.EnabledExpensive { @@ -841,17 +848,13 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // The onleaf func is called _serially_, so we can reuse the same account // for unmarshalling every time. var account Account - root, err := s.trie.Commit(func(leaf []byte, parent common.Hash) error { + root, err := s.trie.Commit(func(path []byte, leaf []byte, parent common.Hash) error { if err := rlp.DecodeBytes(leaf, &account); err != nil { return nil } if account.Root != emptyRoot { s.db.TrieDB().Reference(account.Root, parent) } - code := common.BytesToHash(account.CodeHash) - if code != emptyCode { - s.db.TrieDB().Reference(code, parent) - } return nil }) if metrics.EnabledExpensive { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 824a597498..36ff271331 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -144,7 +144,7 @@ func TestIntermediateLeaks(t *testing.T) { } } -// TestCopy tests that copying a statedb object indeed makes the original and +// TestCopy tests that copying a StateDB object indeed makes the original and // the copy independent of each other. This test is a regression test against // https://github.com/ethereum/go-ethereum/pull/15549. func TestCopy(t *testing.T) { @@ -647,11 +647,11 @@ func TestCopyCopyCommitCopy(t *testing.T) { } // TestDeleteCreateRevert tests a weird state transition corner case that we hit -// while changing the internals of statedb. The workflow is that a contract is -// self destructed, then in a followup transaction (but same block) it's created +// while changing the internals of StateDB. The workflow is that a contract is +// self-destructed, then in a follow-up transaction (but same block) it's created // again and the transaction reverted. // -// The original statedb implementation flushed dirty objects to the tries after +// The original StateDB implementation flushed dirty objects to the tries after // each transaction, so this works ok. The rework accumulated writes in memory // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { @@ -681,7 +681,7 @@ func TestDeleteCreateRevert(t *testing.T) { } } -// TestMissingTrieNodes tests that if the statedb fails to load parts of the trie, +// TestMissingTrieNodes tests that if the StateDB fails to load parts of the trie, // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { diff --git a/core/state/sync.go b/core/state/sync.go index ef79305273..1018b78e5e 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -28,13 +28,13 @@ import ( // NewStateSync create a new state trie download scheduler. func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom) *trie.Sync { var syncer *trie.Sync - callback := func(leaf []byte, parent common.Hash) error { + callback := func(path []byte, leaf []byte, parent common.Hash) error { var obj Account if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { return err } - syncer.AddSubTrie(obj.Root, 64, parent, nil) - syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent) + syncer.AddSubTrie(obj.Root, path, parent, nil) + syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent) return nil } syncer = trie.NewSync(root, database, callback, bloom) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 924c8c2f90..17670750ed 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -133,13 +133,17 @@ func TestEmptyStateSync(t *testing.T) { // Tests that given a root hash, a state can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go. -func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } -func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } +func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1, false) } +func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100, false) } +func TestIterativeStateSyncIndividualFromDisk(t *testing.T) { testIterativeStateSync(t, 1, true) } +func TestIterativeStateSyncBatchedFromDisk(t *testing.T) { testIterativeStateSync(t, 100, true) } -func testIterativeStateSync(t *testing.T, count int) { +func testIterativeStateSync(t *testing.T, count int, commit bool) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() - + if commit { + srcDb.TrieDB().Commit(srcRoot, false, nil) + } // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) @@ -149,13 +153,18 @@ func testIterativeStateSync(t *testing.T, count int) { results := make([]trie.SyncResult, len(queue)) for i, hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -184,13 +193,18 @@ func TestIterativeDelayedStateSync(t *testing.T) { results := make([]trie.SyncResult, len(queue)/2+1) for i, hash := range queue[:len(results)] { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -226,14 +240,19 @@ func testIterativeRandomStateSync(t *testing.T, count int) { results := make([]trie.SyncResult, 0, len(queue)) for hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results = append(results, trie.SyncResult{Hash: hash, Data: data}) } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -270,6 +289,9 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { delete(queue, hash) data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } @@ -280,8 +302,10 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { } } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -302,6 +326,15 @@ func TestIncompleteStateSync(t *testing.T) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() + // isCode reports whether the hash is contract code hash. + isCode := func(hash common.Hash) bool { + for _, acc := range srcAccounts { + if hash == crypto.Keccak256Hash(acc.code) { + return true + } + } + return false + } checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) // Create a destination state and sync with the scheduler @@ -315,14 +348,19 @@ func TestIncompleteStateSync(t *testing.T) { results := make([]trie.SyncResult, len(queue)) for i, hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } // Process each of the state nodes - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -333,12 +371,9 @@ func TestIncompleteStateSync(t *testing.T) { added = append(added, result.Hash) } // Check that all known sub-tries added so far are complete or missing entirely. - checkSubtries: for _, hash := range added { - for _, acc := range srcAccounts { - if hash == crypto.Keccak256Hash(acc.code) { - continue checkSubtries // skip trie check of code nodes. - } + if isCode(hash) { + continue } // Can't use checkStateConsistency here because subtrie keys may have odd // length and crash in LeafKey. @@ -351,13 +386,25 @@ func TestIncompleteStateSync(t *testing.T) { } // Sanity check that removing any node from the database is detected for _, node := range added[1:] { - key := node.Bytes() - value, _ := dstDb.Get(key) - - dstDb.Delete(key) + var ( + key = node.Bytes() + code = isCode(node) + val []byte + ) + if code { + val = rawdb.ReadCode(dstDb, node) + rawdb.DeleteCode(dstDb, node) + } else { + val = rawdb.ReadTrieNode(dstDb, node) + rawdb.DeleteTrieNode(dstDb, node) + } if err := checkStateConsistency(dstDb, added[0]); err == nil { t.Fatalf("trie inconsistency not caught, missing: %x", key) } - dstDb.Put(key, value) + if code { + rawdb.WriteCode(dstDb, node, val) + } else { + rawdb.WriteTrieNode(dstDb, node, val) + } } } diff --git a/core/tx_pool.go b/core/tx_pool.go index 1bb2440523..b3a6342173 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1429,6 +1429,8 @@ func (pool *TxPool) demoteUnexecutables() { pool.enqueueTx(hash, tx) } pendingGauge.Dec(int64(len(gapped))) + // This might happen in a reorg, so log it to the metering + blockReorgInvalidatedTx.Mark(int64(len(gapped))) } // Delete the entire pending entry if it became empty. if list.Empty() { diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index c270246efb..b8c61dc418 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) // testTxPoolConfig is a transaction pool configuration without stateful disk @@ -55,7 +56,7 @@ type testBlockChain struct { func (bc *testBlockChain) CurrentBlock() *types.Block { return types.NewBlock(&types.Header{ GasLimit: bc.gasLimit, - }, nil, nil, nil) + }, nil, nil, nil, new(trie.Trie)) } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { diff --git a/core/types/block.go b/core/types/block.go index 8316cd7f3a..8096ebb755 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -35,7 +35,7 @@ import ( ) var ( - EmptyRootHash = DeriveSha(Transactions{}) + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") EmptyUncleHash = rlpHash([]*Header(nil)) ) @@ -221,14 +221,14 @@ type storageblock struct { // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block { +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher) *Block { b := &Block{header: CopyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { b.header.TxHash = EmptyRootHash } else { - b.header.TxHash = DeriveSha(Transactions(txs)) + b.header.TxHash = DeriveSha(Transactions(txs), hasher) b.transactions = make(Transactions, len(txs)) copy(b.transactions, txs) } @@ -236,7 +236,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* if len(receipts) == 0 { b.header.ReceiptHash = EmptyRootHash } else { - b.header.ReceiptHash = DeriveSha(Receipts(receipts)) + b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) } diff --git a/core/types/block_test.go b/core/types/block_test.go index 46ad00c6eb..4dfdcf9545 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -18,6 +18,7 @@ package types import ( "bytes" + "hash" "math/big" "reflect" "testing" @@ -27,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) // from bcValidBlockTest.json, "SimpleTx" @@ -90,6 +92,30 @@ func BenchmarkEncodeBlock(b *testing.B) { } } +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + func makeBenchBlock() *Block { var ( key, _ = crypto.GenerateKey() @@ -128,5 +154,5 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts) + return NewBlock(header, txs, uncles, receipts, newHasher()) } diff --git a/core/types/derive_sha.go b/core/types/derive_sha.go index 00c42c5bc6..7d40c7f660 100644 --- a/core/types/derive_sha.go +++ b/core/types/derive_sha.go @@ -21,21 +21,28 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" ) +// DerivableList is the interface which can derive the hash. type DerivableList interface { Len() int GetRlp(i int) []byte } -func DeriveSha(list DerivableList) common.Hash { +// Hasher is the tool used to calculate the hash of derivable list. +type Hasher interface { + Reset() + Update([]byte, []byte) + Hash() common.Hash +} + +func DeriveSha(list DerivableList, hasher Hasher) common.Hash { + hasher.Reset() keybuf := new(bytes.Buffer) - trie := new(trie.Trie) for i := 0; i < list.Len(); i++ { keybuf.Reset() rlp.Encode(keybuf, uint(i)) - trie.Update(keybuf.Bytes(), list.GetRlp(i)) + hasher.Update(keybuf.Bytes(), list.GetRlp(i)) } - return trie.Hash() + return hasher.Hash() } diff --git a/eth/api.go b/eth/api.go index 3e0955fe51..6103ed4a04 100644 --- a/eth/api.go +++ b/eth/api.go @@ -354,7 +354,7 @@ func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, // AccountRangeMaxResults is the maximum number of results to be returned per call const AccountRangeMaxResults = 256 -// AccountRangeAt enumerates all accounts in the given block and start point in paging request +// AccountRange enumerates all accounts in the given block and start point in paging request func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { var stateDb *state.StateDB var err error diff --git a/eth/api_tracer.go b/eth/api_tracer.go index 5964c7a8b5..b79df0e2b9 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -401,7 +401,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, return api.TraceBlock(ctx, blob, config) } -// TraceBadBlockByHash returns the structured logs created during the execution of +// TraceBadBlock returns the structured logs created during the execution of // EVM against a block pulled from the pool of bad ones and returns them as a JSON // object. func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { diff --git a/eth/bloombits.go b/eth/bloombits.go index f8b77f9cff..bd34bd7b69 100644 --- a/eth/bloombits.go +++ b/eth/bloombits.go @@ -137,7 +137,7 @@ func (b *BloomIndexer) Commit() error { return batch.Write() } -// PruneSections returns an empty error since we don't support pruning here. +// Prune returns an empty error since we don't support pruning here. func (b *BloomIndexer) Prune(threshold uint64) error { return nil } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 5eb0549c1e..f82b501181 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -111,7 +111,7 @@ type Downloader struct { peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node existence checks + stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at @@ -178,8 +178,8 @@ type LightChain interface { // InsertHeaderChain inserts a batch of headers into the local chain. InsertHeaderChain([]*types.Header, int) (int, error) - // Rollback removes a few recently added elements from the local chain. - Rollback([]common.Hash) + // SetHead rewinds the local chain to a new head. + SetHead(uint64) error } // BlockChain encapsulates functions required to sync a (full or fast) blockchain. @@ -285,6 +285,15 @@ func (d *Downloader) Synchronising() bool { return atomic.LoadInt32(&d.synchronising) > 0 } +// SyncBloomContains tests if the syncbloom filter contains the given hash: +// - false: the bloom definitely does not contain hash +// - true: the bloom maybe contains hash +// +// While the bloom is being initialized (or is closed), all queries will return true. +func (d *Downloader) SyncBloomContains(hash []byte) bool { + return d.stateBloom == nil || d.stateBloom.Contains(hash) +} + // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { @@ -477,6 +486,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I if pivot <= origin { origin = pivot - 1 } + // Write out the pivot into the database so a rollback beyond it will + // reenable fast sync + rawdb.WriteLastPivotNumber(d.stateDB, pivot) } } d.committed = 1 @@ -504,6 +516,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.ancientLimit = height - fullMaxForkAncestry - 1 } frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. + // If a part of blockchain data has already been written into active store, // disable the ancient style insertion explicitly. if origin >= frozen && frozen != 0 { @@ -514,11 +527,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I } // Rewind the ancient store and blockchain if reorg happens. if origin+1 < frozen { - var hashes []common.Hash - for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ { - hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i)) + if err := d.lightchain.SetHead(origin + 1); err != nil { + return err } - d.lightchain.Rollback(hashes) } } // Initiate the sync using a concurrent header and content retrieval algorithm @@ -1428,35 +1439,32 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { // Keep a count of uncertain headers to roll back var ( - rollback []*types.Header + rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) rollbackErr error mode = d.getMode() ) defer func() { - if len(rollback) > 0 { - // Flatten the headers and roll them back - hashes := make([]common.Hash, len(rollback)) - for i, header := range rollback { - hashes[i] = header.Hash() - } + if rollback > 0 { lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 if mode != LightSync { lastFastBlock = d.blockchain.CurrentFastBlock().Number() lastBlock = d.blockchain.CurrentBlock().Number() } - d.lightchain.Rollback(hashes) + if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block + // We're already unwinding the stack, only print the error to make it more visible + log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) + } curFastBlock, curBlock := common.Big0, common.Big0 if mode != LightSync { curFastBlock = d.blockchain.CurrentFastBlock().Number() curBlock = d.blockchain.CurrentBlock().Number() } - log.Warn("Rolled back headers", "count", len(hashes), + log.Warn("Rolled back chain segment", "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) } }() - // Wait for batches of headers to process gotHeaders := false @@ -1508,7 +1516,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er } } // Disable any rollback and return - rollback = nil + rollback = 0 return nil } // Otherwise split the chunk of headers into batches and process them @@ -1527,15 +1535,9 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er limit = len(headers) } chunk := headers[:limit] + // In case of header only syncing, validate the chunk immediately if mode == FastSync || mode == LightSync { - // Collect the yet unknown headers to mark them as uncertain - unknown := make([]*types.Header, 0, len(chunk)) - for _, header := range chunk { - if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { - unknown = append(unknown, header) - } - } // If we're importing pure headers, verify based on their recentness frequency := fsHeaderCheckFrequency if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { @@ -1543,17 +1545,20 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er } if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { rollbackErr = err - // If some headers were inserted, add them too to the rollback list - if n > 0 { - rollback = append(rollback, chunk[:n]...) + + // If some headers were inserted, track them as uncertain + if n > 0 && rollback == 0 { + rollback = chunk[0].Number.Uint64() } log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, store newly found uncertain headers - rollback = append(rollback, unknown...) - if len(rollback) > fsHeaderSafetyNet { - rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) + // All verifications passed, track all headers within the alloted limits + head := chunk[len(chunk)-1].Number.Uint64() + if head-rollback > uint64(fsHeaderSafetyNet) { + rollback = head - uint64(fsHeaderSafetyNet) + } else { + rollback = 1 } } // Unless we're doing light chains, schedule the headers for associated content retrieval @@ -1652,13 +1657,20 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. sync := d.syncState(latest.Root) - defer sync.Cancel() + defer func() { + // The `sync` object is replaced every time the pivot moves. We need to + // defer close the very last active one, hence the lazy evaluation vs. + // calling defer sync.Cancel() !!! + sync.Cancel() + }() + closeOnErr := func(s *stateSync) { if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { d.queue.Close() // wake up Results } } go closeOnErr(sync) + // Figure out the ideal pivot block. Note, that this goalpost may move if the // sync takes long enough for the chain head to move significantly. pivot := uint64(0) @@ -1700,6 +1712,10 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) pivot = height - uint64(fsMinFullBlocks) + + // Write out the pivot into the database so a rollback beyond it will + // reenable fast sync + rawdb.WriteLastPivotNumber(d.stateDB, pivot) } } P, beforeP, afterP := splitAroundPivot(pivot, results) @@ -1710,9 +1726,8 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { // If new pivot block found, cancel old state retrieval and restart if oldPivot != P { sync.Cancel() - sync = d.syncState(P.Header.Root) - defer sync.Cancel() + go closeOnErr(sync) oldPivot = P } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index e774b2b89d..7c165c63c3 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -341,25 +341,52 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ return len(blocks), nil } -// Rollback removes some recently added elements from the chain. -func (dl *downloadTester) Rollback(hashes []common.Hash) { +// SetHead rewinds the local chain to a new head. +func (dl *downloadTester) SetHead(head uint64) error { dl.lock.Lock() defer dl.lock.Unlock() - for i := len(hashes) - 1; i >= 0; i-- { - if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { - dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] + // Find the hash of the head to reset to + var hash common.Hash + for h, header := range dl.ownHeaders { + if header.Number.Uint64() == head { + hash = h + } + } + for h, header := range dl.ancientHeaders { + if header.Number.Uint64() == head { + hash = h + } + } + if hash == (common.Hash{}) { + return fmt.Errorf("unknown head to set: %d", head) + } + // Find the offset in the header chain + var offset int + for o, h := range dl.ownHashes { + if h == hash { + offset = o + break } - delete(dl.ownChainTd, hashes[i]) - delete(dl.ownHeaders, hashes[i]) - delete(dl.ownReceipts, hashes[i]) - delete(dl.ownBlocks, hashes[i]) + } + // Remove all the hashes and associated data afterwards + for i := offset + 1; i < len(dl.ownHashes); i++ { + delete(dl.ownChainTd, dl.ownHashes[i]) + delete(dl.ownHeaders, dl.ownHashes[i]) + delete(dl.ownReceipts, dl.ownHashes[i]) + delete(dl.ownBlocks, dl.ownHashes[i]) - delete(dl.ancientChainTd, hashes[i]) - delete(dl.ancientHeaders, hashes[i]) - delete(dl.ancientReceipts, hashes[i]) - delete(dl.ancientBlocks, hashes[i]) + delete(dl.ancientChainTd, dl.ownHashes[i]) + delete(dl.ancientHeaders, dl.ownHashes[i]) + delete(dl.ancientReceipts, dl.ownHashes[i]) + delete(dl.ancientBlocks, dl.ownHashes[i]) } + dl.ownHashes = dl.ownHashes[:offset+1] + return nil +} + +// Rollback removes some recently added elements from the chain. +func (dl *downloadTester) Rollback(hashes []common.Hash) { } // newPeer registers a new block download source into the downloader. diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 87225cb625..aba4d5dbf7 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -771,7 +772,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi q.lock.Lock() defer q.lock.Unlock() validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash { + if types.DeriveSha(types.Transactions(txLists[index]), new(trie.Trie)) != header.TxHash { return errInvalidBody } if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { @@ -796,7 +797,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, q.lock.Lock() defer q.lock.Unlock() validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { + if types.DeriveSha(types.Receipts(receiptList[index]), new(trie.Trie)) != header.ReceiptHash { return errInvalidReceipt } return nil diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 25c8fccb5b..bf9e96fe2a 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -474,7 +474,7 @@ func (s *stateSync) process(req *stateReq) (int, error) { // Iterate over all the delivered data and inject one-by-one into the trie for _, blob := range req.response { - _, hash, err := s.processNodeData(blob) + hash, err := s.processNodeData(blob) switch err { case nil: s.numUncommitted++ @@ -512,13 +512,13 @@ func (s *stateSync) process(req *stateReq) (int, error) { // processNodeData tries to inject a trie node data blob delivered from a remote // peer into the state trie, returning whether anything useful was written or any // error occurred. -func (s *stateSync) processNodeData(blob []byte) (bool, common.Hash, error) { +func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { res := trie.SyncResult{Data: blob} s.keccak.Reset() s.keccak.Write(blob) s.keccak.Sum(res.Hash[:0]) - committed, _, err := s.sched.Process([]trie.SyncResult{res}) - return committed, res.Hash, err + err := s.sched.Process(res) + return res.Hash, err } // updateStats bumps the various state sync progress counters and displays a log diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 2c2dabad96..270aaf5918 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -540,7 +541,7 @@ func (f *BlockFetcher) loop() { announce.time = task.time // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { + if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash { log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) block := types.NewBlockWithHeader(header) @@ -619,7 +620,7 @@ func (f *BlockFetcher) loop() { continue } if txnHash == (common.Hash{}) { - txnHash = types.DeriveSha(types.Transactions(task.transactions[i])) + txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), new(trie.Trie)) } if txnHash != announce.header.TxHash { continue diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 531acf1482..b19db24083 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -39,7 +40,7 @@ var ( testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddress = crypto.PubkeyToAddress(testKey.PublicKey) genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) - unknownBlock = types.NewBlock(&types.Header{GasLimit: vars.GenesisGasLimit}, nil, nil, nil) + unknownBlock = types.NewBlock(&types.Header{GasLimit: vars.GenesisGasLimit}, nil, nil, nil, new(trie.Trie)) ) // makeChain creates a chain of n blocks starting at and including parent. diff --git a/eth/handler.go b/eth/handler.go index 3093c5b54e..0b300f5d9a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -607,7 +607,18 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested state entry, stopping if enough was found - if entry, err := pm.blockchain.TrieNode(hash); err == nil { + // todo now the code and trienode is mixed in the protocol level, + // separate these two types. + if !pm.downloader.SyncBloomContains(hash[:]) { + // Only lookup the trie node if there's chance that we actually have it + continue + } + entry, err := pm.blockchain.TrieNode(hash) + if len(entry) == 0 || err != nil { + // Read the contract code with prefix only to save unnecessary lookups. + entry, err = pm.blockchain.ContractCodeWithPrefix(hash) + } + if err == nil && len(entry) > 0 { data = append(data, entry) bytes += len(entry) } @@ -702,7 +713,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash()) break // TODO(karalabe): return error eventually, but wait a few releases } - if hash := types.DeriveSha(request.Block.Transactions()); hash != request.Block.TxHash() { + if hash := types.DeriveSha(request.Block.Transactions(), new(trie.Trie)); hash != request.Block.TxHash() { log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash()) break // TODO(karalabe): return error eventually, but wait a few releases } diff --git a/eth/sync.go b/eth/sync.go index 0982a9702d..26badd1e21 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -271,15 +271,25 @@ func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp { } func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { + // If we're in fast sync mode, return that directly if atomic.LoadUint32(&cs.pm.fastSync) == 1 { block := cs.pm.blockchain.CurrentFastBlock() td := cs.pm.blockchain.GetTdByHash(block.Hash()) return downloader.FastSync, td - } else { - head := cs.pm.blockchain.CurrentHeader() - td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - return downloader.FullSync, td } + // We are probably in full sync, but we might have rewound to before the + // fast sync pivot, check if we should reenable + if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil { + if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot { + block := cs.pm.blockchain.CurrentFastBlock() + td := cs.pm.blockchain.GetTdByHash(block.Hash()) + return downloader.FastSync, td + } + } + // Nope, we're really full syncing + head := cs.pm.blockchain.CurrentHeader() + td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + return downloader.FullSync, td } // startSync launches doSync in a new goroutine. diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index d0a0bf7c1a..c2da1ed1f8 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -2,8 +2,8 @@ // sources: // 4byte_tracer.js (2.933kB) // bigram_tracer.js (1.712kB) -// call_tracer.js (8.643kB) -// evmdis_tracer.js (4.194kB) +// call_tracer.js (8.704kB) +// evmdis_tracer.js (4.195kB) // noop_tracer.js (1.271kB) // opcount_tracer.js (1.372kB) // prestate_tracer.js (4.234kB) @@ -117,7 +117,7 @@ func bigram_tracerJs() (*asset, error) { return a, nil } -var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x24\x0f\xb5\x84\x28\x92\x93\xf4\xf6\x02\x76\xd5\x0b\x5d\x47\x49\x0d\xb8\x71\x60\x2b\x0d\x82\x20\x0f\xd4\xee\xac\xc4\x9a\x4b\x6e\x49\xae\xe4\x3d\xa9\xbf\xfb\xc1\x0c\xb9\xab\xd5\x1f\x3b\x6e\x0f\xce\x41\xcf\x8b\xa0\x5d\xce\x0c\x87\x33\xbf\xf9\xc7\x1d\x8d\xe0\xcc\x14\x95\x95\x8b\xa5\x87\x97\xc7\x2f\xfe\x17\x66\x4b\x84\x85\x79\x8e\x7e\x89\x16\xcb\x1c\x26\xa5\x5f\x1a\xeb\xba\xa3\x11\xcc\x96\xd2\x41\x26\x15\x82\x74\x50\x08\xeb\xc1\x64\xe0\x77\xe8\x95\x9c\x5b\x61\xab\x61\x77\x34\x0a\x3c\x07\x97\x49\x42\x66\x11\xc1\x99\xcc\xaf\x85\xc5\x13\xa8\x4c\x09\x89\xd0\x60\x31\x95\xce\x5b\x39\x2f\x3d\x82\xf4\x20\x74\x3a\x32\x16\x72\x93\xca\xac\x22\x91\xd2\x43\xa9\x53\xb4\xbc\xb5\x47\x9b\xbb\x5a\x8f\xb7\xef\x3e\xc0\x05\x3a\x87\x16\xde\xa2\x46\x2b\x14\xbc\x2f\xe7\x4a\x26\x70\x21\x13\xd4\x0e\x41\x38\x28\xe8\x8d\x5b\x62\x0a\x73\x16\x47\x8c\x6f\x48\x95\xeb\xa8\x0a\xbc\x31\xa5\x4e\x85\x97\x46\x0f\x00\x25\x69\x0e\x2b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x0d\xeb\x23\x0c\xb2\x39\x77\x0a\x52\xf3\x36\x4b\x53\x20\xf8\xa5\xf0\x74\xea\xb5\x54\x0a\xe6\x08\xa5\xc3\xac\x54\x03\x92\x36\x2f\x3d\x7c\x3c\x9f\xfd\x7c\xf9\x61\x06\x93\x77\x9f\xe0\xe3\xe4\xea\x6a\xf2\x6e\xf6\xe9\x14\xd6\xd2\x2f\x4d\xe9\x01\x57\x18\x44\xc9\xbc\x50\x12\x53\x58\x0b\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xff\xf3\x8b\xf3\xd9\x27\x30\x16\xde\x9c\xcf\xde\x4d\xaf\xaf\xe1\xcd\xe5\x15\x4c\xe0\xfd\xe4\x6a\x76\x7e\xf6\xe1\x62\x72\x05\xef\x3f\x5c\xbd\xbf\xbc\x9e\x0e\xe1\x1a\x49\x2b\x24\xfe\x6f\xdb\x3c\x63\xef\x59\x84\x14\xbd\x90\xca\xd5\x96\xf8\x64\x4a\x70\x4b\x53\xaa\x14\x96\x62\x85\x60\x31\x41\xb9\xc2\x14\x04\x24\xa6\xa8\x1e\xed\x54\x92\x25\x94\xd1\x0b\x3e\xf3\xbd\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xaf\xd7\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x96\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xae\x90\x49\x20\xd0\x44\x7b\x4e\x7f\xfd\x05\xf0\x16\x93\x32\x48\xea\x34\x42\x4e\xe0\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\x37\x0e\xd6\x4b\xb6\x28\xac\xf1\x68\x85\xf0\x5b\xe9\x7c\x8b\x26\xb3\x26\x07\xa1\xc1\x94\x84\xf8\xb6\x75\xa4\xf6\x86\x05\x0a\xfa\xaf\xd1\xb2\x46\xc3\x6e\xa7\x61\x3e\x81\x4c\x28\x87\x71\x5f\xe7\xb1\xa0\xd3\x48\xbd\x32\x37\x24\xd9\x58\x82\xb0\xad\xc0\x14\x89\x49\x63\x30\xd0\x39\x9a\x63\xa0\x1b\x76\x3b\xc4\x77\x02\x59\xa9\x79\xdb\x9e\x32\x8b\x01\xa4\xf3\x3e\x7c\xed\x76\x48\xec\x99\x28\x7c\x69\x91\xed\x89\xd6\x1a\xeb\x40\xe6\x39\xa6\x52\x78\x54\x55\xb7\xd3\x59\x09\x1b\x16\x60\x0c\xca\x2c\x86\x0b\xf4\x53\x7a\xec\xf5\x4f\xbb\x9d\x8e\xcc\xa0\x17\x56\x9f\x8c\xc7\x9c\x7d\x32\xa9\x31\x0d\xe2\x3b\x7e\x29\xdd\x30\x13\xa5\xf2\xcd\xbe\xc4\xd4\xb1\xe8\x4b\xab\xe9\xef\x5d\xd0\xe2\x23\x82\xd1\xaa\x82\x84\xb2\x8c\x98\x53\x78\xba\xca\x79\xcc\xe3\xe1\xdc\x00\x32\xe1\xc8\x84\x32\x83\x35\x42\x61\xf1\x79\xb2\x44\xf2\x9d\x4e\x30\x6a\xe9\x2a\xc7\x4e\x1d\x03\xed\x36\x34\xc5\xd0\x9b\x77\x65\x3e\x47\xdb\xeb\xc3\x77\x70\x7c\x9b\x1d\xf7\x61\x3c\xe6\x3f\xb5\xee\x91\x27\xea\x4b\x52\x4c\x11\x0f\xca\xfc\xd7\xde\x4a\xbd\x08\x67\x8d\xba\x9e\x67\x20\x40\xe3\x1a\x12\xa3\x19\xd4\xe4\x95\x39\x4a\xbd\x80\xc4\xa2\xf0\x98\x0e\x40\xa4\x29\x78\x13\x90\xd7\xe0\x6c\x7b\x4b\xf8\xee\x3b\xe8\xd1\x66\x63\x38\x3a\xbb\x9a\x4e\x66\xd3\x23\xf8\xe3\x0f\x08\x6f\x9e\x86\x37\x2f\x9f\xf6\x5b\x9a\x49\x7d\x99\x65\x51\x39\x16\x38\x2c\x10\x6f\x7a\x2f\xfa\xc3\x95\x50\x25\x5e\x66\x41\xcd\x48\x3b\xd5\x29\x8c\x23\xcf\xb3\x5d\x9e\x97\x5b\x3c\xc4\x34\x1a\xc1\xc4\x39\xcc\xe7\x0a\xf7\x03\x32\x46\x2c\x07\xaf\xf3\x94\xb1\x08\x7d\x89\xc9\x0b\x85\x84\xaa\x7a\xd7\x68\x7e\xd6\xb8\xe3\xab\x02\x4f\x00\x00\x4c\x31\xe0\x17\x14\x0b\xfc\xc2\x9b\x9f\xf1\x96\x7d\x54\x9b\x90\x50\x35\x49\x53\x8b\xce\xf5\xfa\xfd\x40\x2e\x75\x51\xfa\x93\x2d\xf2\x1c\x73\x63\xab\xa1\xa3\x84\xd4\xe3\xa3\x0d\xc2\x49\x6b\x9e\x85\x70\xe7\x9a\x78\x22\x52\xdf\x0a\xd7\xdb\x2c\x9d\x19\xe7\x4f\xea\x25\x7a\xa8\xd7\xd8\x16\xc4\x76\x74\x7c\x7b\xb4\x6f\xad\xe3\xfe\x06\x09\x2f\x7e\xe8\x13\xcb\xdd\x69\x83\xef\x26\x4d\x0c\x8b\xd2\x2d\x7b\x0c\xa7\xcd\xea\x26\x15\x8c\xc1\xdb\x12\x0f\xc2\x9f\x21\xb5\x0f\x27\x87\x2a\xa3\x5c\xe2\x6d\x99\x30\xac\x16\x82\x33\x0d\x47\xba\xa0\xcc\xeb\xca\x39\xdb\xdc\x1b\xb3\x8f\xae\x08\xae\xeb\xe9\xc5\x9b\xd7\xd3\xeb\xd9\xd5\x87\xb3\xd9\x51\x0b\x4e\x0a\x33\x4f\x4a\x6d\x9f\x41\xa1\x5e\xf8\x25\xeb\x4f\xe2\xb6\x57\x3f\x13\xcf\xf3\x17\x5f\xc2\x1b\x18\x1f\x08\xf9\xce\xc3\x1c\xf0\xf9\x0b\xcb\xbe\xdb\x37\xdf\x36\x69\x30\xe6\xd7\x00\x22\x53\xdc\xb5\x13\xc7\x81\x58\xcc\xd1\x2f\x4d\xca\xc9\x31\x11\x21\xbf\xd6\x56\x4c\x8d\xc6\x3f\x1f\x91\x93\x8b\x8b\x56\x3c\xf2\xf3\xd9\xe5\xeb\x76\x8c\x1e\xbd\x9e\x5e\x4c\xdf\x4e\x66\xd3\x5d\xda\xeb\xd9\x64\x76\x7e\xc6\x6f\xeb\xf0\x1d\x8d\xe0\xfa\x46\x16\x9c\x65\x39\x77\x99\xbc\xe0\x76\xb1\xd1\xd7\x0d\xc0\x2f\x0d\x35\x62\x36\x16\x91\x4c\xe8\xa4\x4e\xee\xae\x76\x9a\x37\xe4\x32\x53\xc7\xca\x7e\x2a\x68\x03\xb5\xdf\xb8\x51\xba\xf7\x16\xe3\xa6\x69\xcf\x9b\x5a\xaf\x8d\x41\x83\x47\x38\x01\x72\x92\xe9\x3d\xfe\x90\xf0\x7f\x70\x0c\x27\xf0\x22\x66\x92\x07\x52\xd5\x4b\x78\x46\xe2\xff\x42\xc2\x7a\x75\x80\xf3\xef\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7c\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x35\xe2\xf7\x7b\x46\x6c\xe8\x2f\x50\xef\xd3\xff\xcf\x1e\xfd\x26\xf5\x11\xaa\x4c\x01\x4f\xf6\x20\x12\x12\xcf\x93\x9d\x38\x88\xc6\xe5\x16\x87\xa5\xc1\xf8\x9e\x64\xfb\x72\x1b\xc3\xf7\x65\x8b\x7f\x29\xd9\x1e\x6c\xd5\xa8\x21\xdb\x6e\xc6\x06\x60\xd1\x5b\x89\x2b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\xac\x85\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\xb9\xa8\x68\x5c\xc9\x4a\x7d\x53\xc1\x42\x38\x48\x2b\x2d\x72\x99\xb8\x20\x8f\x1b\x5c\x8b\x0b\x61\x59\xac\xc5\xdf\x4b\x74\x34\xfb\x10\x90\x45\xe2\x4b\xa1\x54\x05\x0b\x49\x03\x0c\x71\xf7\x5e\xbe\x3a\x3e\x06\xe7\x65\x81\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x54\xd8\x1f\x76\x5b\x69\xbc\x39\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xc2\x2f\x7b\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\xde\xc2\x6d\xf0\x24\xa0\x72\x18\xa5\xd1\xd0\x77\xf9\xfa\xb2\x77\x23\xac\x50\x62\x8e\xfd\x13\x1e\x02\xd9\x56\x6b\x11\xa7\x00\x72\x0a\x14\x4a\x48\x0d\x22\x49\x4c\xa9\x3d\x19\xbe\x6e\xe8\x55\x45\xf9\xfd\xc8\xd7\xf2\x78\x5e\x12\x49\x82\xce\xd5\xe9\x9e\xbd\x46\xea\x88\x9c\xb8\x41\x6a\x27\x53\x6c\x79\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x05\xe6\xc6\xd1\x26\x73\x84\xb5\xa5\xe1\xc3\x49\x9d\xf0\xf4\x9d\x22\x59\xdb\x81\xd1\x20\x40\x19\x1e\xf9\x39\xc6\x41\xd8\x85\x1b\x86\x7c\x4f\xdb\x52\xce\xd1\x66\x3d\xdc\x06\x72\x1b\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x18\x40\x61\x0a\xce\xd3\xdf\x2a\x67\x31\x59\x5f\x4d\x7f\x9d\x5e\x35\xc5\xff\xf1\x4e\xac\xfb\xfe\xa7\xcd\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x8d\xef\x01\x14\xc9\xdf\xd4\xc6\xf7\xad\xe3\x28\xe1\xfc\xc6\x31\x0b\x0c\x33\x4d\x5b\x01\x57\x2a\xef\x76\x72\xf7\x6e\x72\x30\x45\x5d\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xad\x85\x4d\xd3\xbd\xc1\xe7\x79\xcb\xc6\x6b\x6e\xb9\x02\x51\x2b\x35\xf0\x7a\xdd\xbb\x89\x50\x0d\x58\x77\x53\x7a\x82\x03\xd5\xef\x4d\xf2\x5b\x08\xf7\xc1\xb1\xd7\x63\xfa\x9b\xcb\xc5\xb9\xf6\xbd\x7a\xf1\x5c\xc3\x73\xa8\x1f\x28\xa9\xc3\xf3\xad\x28\x3a\x90\x1d\x3b\x29\x2a\xf4\x08\x1b\x11\xa7\xb0\xf3\x8a\x04\x05\x73\xb0\xd1\x2c\xfa\xfd\xe2\x7c\x1c\xa5\x91\xc1\x9e\x58\xf4\x43\xfc\xbd\x14\xca\xf5\x8e\x9b\x66\x21\x9c\xc0\x1b\x2e\x6f\xe3\xa6\xc0\xd5\x15\x90\x78\xb6\xda\x8f\x28\x30\xb0\x45\x6b\xd4\x6c\xe9\x3c\x54\xad\x14\x1f\x94\x10\x45\xc4\xb4\xd1\xf8\x32\x02\xf3\x50\xff\xd9\x69\x13\xc0\xd3\xa6\x21\xc8\x84\x54\xa5\xc5\xa7\xa7\x70\x20\xed\xb8\xd2\x66\x22\x61\x5f\x3a\x04\x9e\x58\x1d\x38\x93\xe3\xd2\xac\x83\x02\x87\x92\xd7\x3e\x38\x1a\x1c\xec\x94\x0f\xbe\x7a\x11\x0e\x4a\x27\x16\xd8\x02\x47\x63\xf0\xda\x51\x07\xc7\xe8\xbf\x0c\x9d\x67\xcd\xe3\x37\x50\x14\x76\xf9\x26\x34\x1e\xc2\xc6\x41\x2f\xef\x75\x39\x35\x11\xf7\x3a\xad\x87\x5a\xd5\xd0\x8a\x34\xc8\xf9\x33\x7e\xff\xf7\x38\x3e\x78\x3e\xfe\x3e\x36\xd0\x76\x69\xc3\x19\xb7\x89\xc3\x49\x37\xed\xcd\xb7\x51\xd0\xac\xde\x07\x80\xfb\x3a\x27\x82\xaa\xfe\x0d\x13\xbf\x81\x2b\x37\x3b\xf4\x54\x58\x5c\x49\x53\x52\x1d\xc3\xff\xa6\xc9\xb0\xe9\xfc\xee\xba\x9d\xbb\x78\x45\xc6\xee\x6b\xdf\x91\xad\x97\xf1\x8a\x37\x34\x4d\xad\x2a\x62\xb8\xc4\xc6\x9b\xb3\x2c\x5c\xbe\x76\x98\xff\x81\xbb\xb2\x18\xef\xde\x14\xd4\x15\xc4\x22\xa5\x2c\x8a\xb4\x6a\xea\xe2\x20\xf4\x23\xb0\x14\x3a\x8d\x33\x89\x48\x53\x49\xf2\x18\x8b\xa4\xa1\x58\x08\xa9\xbb\x07\xcd\xf8\xcd\x62\x7c\x08\x19\x7b\x2d\x6e\xbb\x9e\xc6\x59\x92\x06\x3f\xd6\xb8\xfb\x88\xba\xb9\x13\x4b\xbb\xd7\x7e\xf1\xe6\xd0\x68\x57\xe6\xdc\x10\x83\x58\x09\xa9\x04\x0d\x61\xdc\x68\xe9\x14\x12\x85\x42\x87\xcb\x7e\xcc\xbc\x59\xa1\x75\xdd\x47\x80\xfc\xaf\x60\x7c\x27\x39\xd6\x8f\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x99\x37\x44\x96\xf4\xfc\x1d\x08\xb5\xef\x3e\x2e\xa4\xb8\x75\x22\x9a\x9f\xe0\xb8\xd5\x9e\xff\x5d\x82\x6c\x1f\x62\x17\x4d\x9b\x16\x0f\xef\x8d\x19\x80\x42\xc1\xc3\x52\xfd\x95\xa6\x6e\x4b\x1f\x9a\xdd\xea\xe8\x0d\x8d\xdd\x5e\xf8\xf2\xf5\xd6\x12\xeb\x8b\x90\xd0\xe1\xcf\x11\x35\x48\x8f\x56\xd0\x58\x44\xe8\x8a\x1f\x16\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\xb7\xfc\x54\x9f\xa5\x5e\x0c\xbb\x9d\xf0\xbe\x15\xef\x89\xbf\xdd\xc4\x7b\x28\x86\xcc\x19\xaf\x06\x9a\x9b\x81\xc4\xdf\x72\xd3\xc8\xd3\xf3\xce\xf5\x00\xad\xd1\xab\x30\x5a\xef\x5c\x06\x30\x63\xbc\x10\xd8\xbd\x73\xa4\x35\x7e\xb7\x05\x70\x26\x5d\x08\x17\xc4\xec\x84\x84\xbf\xdd\x8f\x88\x9a\x81\x82\xe1\xe4\x30\x03\x2d\x1d\x60\xda\xb9\xa0\x20\x62\x7e\x15\x56\x43\x61\x3f\x69\xaf\x86\x57\xf1\xa0\x32\x6f\xd9\x46\xe6\x6c\x9b\xbb\xd3\xc3\x49\xee\xb8\xc6\xe3\xe1\x64\x46\x36\x6f\x00\x7b\x0f\x6b\x7b\xe4\xd8\x27\x79\x28\x55\xb2\xf4\x3a\xb3\xdd\xc3\xca\xd2\x5b\xad\x87\xbf\x7d\xbc\xc8\x86\xb8\xad\xe2\x16\xcd\x21\x21\x31\xcf\x44\xba\x60\xd9\x5a\x40\x40\x75\xd0\x95\x11\x2d\xff\x81\x51\x62\x3b\x7e\xea\x25\xb0\x18\xbe\x43\x70\x43\x4a\xe1\x63\xe6\x5c\xfc\x4b\x47\xd3\xe4\x26\x2e\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xac\xfa\x9b\x33\x3a\x7c\x71\x42\x2b\x49\x62\xf8\xb2\x16\x3e\x72\xf3\xf7\x3e\x2d\x13\xf4\x15\x64\x28\xf8\xd3\x91\x37\x50\x08\xe7\x20\x47\x41\xd3\x69\x56\x2a\x55\x81\xb1\x29\x92\xf0\x66\x5c\xa3\x90\x34\x50\x3a\xb4\x0e\xd6\x4b\x13\xcb\x24\x77\x69\x05\x35\x9d\xd2\x0f\xe2\x8d\x8c\x74\x85\x12\x15\x48\x4f\x25\x39\x1e\xaa\x1d\xa5\xcd\xf7\x1a\xfe\xe8\x63\xa8\xea\xee\x87\x68\x3d\xd8\x6d\xc7\x28\xbf\xa6\xa7\xed\xe8\x8c\x73\xcd\x76\x5c\x6e\xee\xaa\xb6\x83\xb0\x2e\x1b\xdb\x91\xd6\x2e\x42\xdb\xe1\xc4\x2b\xfc\xb4\x1d\x48\xad\x7e\x99\x17\x18\x1c\x0d\x03\x3f\xed\x84\x16\x6b\x19\x63\x2b\x7c\x9d\x6c\xc8\xf9\x69\x10\x01\x43\x5e\xec\x91\x71\x6e\xb0\xa2\x4c\x1c\x6c\xd4\x2a\x2b\xe1\xc5\xe7\x1b\xac\xbe\x1c\xae\x22\x11\x8e\x2d\xba\xa6\x6c\xd4\x90\x0e\x6b\x0f\x04\x72\xa3\x85\x1c\x1f\x9f\x82\xfc\xb1\xcd\x50\x57\x3e\x90\xcf\x9e\xd5\x7b\xb6\xd7\x3f\xcb\x2f\x75\x74\x36\x88\xdf\x59\xef\x6f\x69\x14\x63\x24\xd0\x50\x50\x74\xef\xba\xff\x0c\x00\x00\xff\xff\x00\x24\x55\x1f\xc3\x21\x00\x00") +var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\xdf\x73\xdb\x36\xf2\x7f\x96\xfe\x8a\x4d\x1e\x6a\x69\xa2\x48\x4a\xd2\x6f\xbf\x33\x76\xd5\x1b\x9d\xa3\xa4\x9e\x71\xe3\x8c\xad\x34\x93\xc9\xe4\x01\x22\x97\x12\x6a\x08\x60\x01\xd0\x32\x2f\xf5\xff\x7e\xb3\x0b\x90\x22\x25\xd9\xf1\xf5\x6e\x6e\x7a\x6f\x24\xb1\xbb\x58\xec\x7e\xf6\x17\x38\x1a\xc1\xa9\xc9\x4b\x2b\x97\x2b\x0f\x2f\xc7\x2f\xfe\x1f\xe6\x2b\x84\xa5\x79\x8e\x7e\x85\x16\x8b\x35\x4c\x0b\xbf\x32\xd6\x75\x47\x23\x98\xaf\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\xb9\xb0\xc2\x96\xc3\xee\x68\x14\x78\x0e\x2e\x93\x84\xcc\x22\x82\x33\x99\xdf\x08\x8b\xc7\x50\x9a\x02\x12\xa1\xc1\x62\x2a\x9d\xb7\x72\x51\x78\x04\xe9\x41\xe8\x74\x64\x2c\xac\x4d\x2a\xb3\x92\x44\x4a\x0f\x85\x4e\xd1\xf2\xd6\x1e\xed\xda\x55\x7a\xbc\x7d\xf7\x01\xce\xd1\x39\xb4\xf0\x16\x35\x5a\xa1\xe0\x7d\xb1\x50\x32\x81\x73\x99\xa0\x76\x08\xc2\x41\x4e\x5f\xdc\x0a\x53\x58\xb0\x38\x62\x7c\x43\xaa\x5c\x45\x55\xe0\x8d\x29\x74\x2a\xbc\x34\x7a\x00\x28\x49\x73\xb8\x41\xeb\xa4\xd1\xf0\xaa\xda\x2a\x0a\x1c\x80\xb1\x24\xa4\x27\x3c\x1d\xc0\x82\xc9\x89\xaf\x0f\x42\x97\xa0\x84\xdf\xb2\x3e\xc2\x20\xdb\x73\xa7\x20\x35\x6f\xb3\x32\x39\x82\x5f\x09\x4f\xa7\xde\x48\xa5\x60\x81\x50\x38\xcc\x0a\x35\x20\x69\x8b\xc2\xc3\xc7\xb3\xf9\xcf\x17\x1f\xe6\x30\x7d\xf7\x09\x3e\x4e\x2f\x2f\xa7\xef\xe6\x9f\x4e\x60\x23\xfd\xca\x14\x1e\xf0\x06\x83\x28\xb9\xce\x95\xc4\x14\x36\xc2\x5a\xa1\x7d\x09\x26\x23\x09\xbf\xcc\x2e\x4f\x7f\x9e\xbe\x9b\x4f\xff\x7e\x76\x7e\x36\xff\x04\xc6\xc2\x9b\xb3\xf9\xbb\xd9\xd5\x15\xbc\xb9\xb8\x84\x29\xbc\x9f\x5e\xce\xcf\x4e\x3f\x9c\x4f\x2f\xe1\xfd\x87\xcb\xf7\x17\x57\xb3\x21\x5c\x21\x69\x85\xc4\xff\x6d\x9b\x67\xec\x3d\x8b\x90\xa2\x17\x52\xb9\xca\x12\x9f\x4c\x01\x6e\x65\x0a\x95\xc2\x4a\xdc\x20\x58\x4c\x50\xde\x60\x0a\x02\x12\x93\x97\x8f\x76\x2a\xc9\x12\xca\xe8\x25\x9f\xf9\x5e\x40\xc2\x59\x06\xda\xf8\x01\x38\x44\xf8\x71\xe5\x7d\x7e\x3c\x1a\x6d\x36\x9b\xe1\x52\x17\x43\x63\x97\x23\x15\xc4\xb9\xd1\x4f\xc3\x2e\xc9\x4c\x84\x52\x73\x2b\x12\xb4\xe4\x1c\x01\x59\x41\xe6\x57\x66\xa3\xc1\x5b\xa1\x9d\x48\xc8\xd5\xf4\x9c\x30\x18\x85\x07\xbc\xa5\x37\xef\x08\xb4\x60\x31\x37\x96\x9e\x95\xaa\x70\x26\xb5\x47\xab\x85\x62\xd9\x0e\xd6\x22\x45\x58\x94\x20\x9a\x02\x07\xcd\xc3\x10\x8c\x82\xbb\x41\xea\xcc\xd8\x35\xc3\x72\xd8\xfd\xda\xed\x44\x0d\x9d\x17\xc9\x35\x29\x48\xf2\x93\xc2\x5a\xd4\x9e\x4c\x59\x58\x27\x6f\x90\x49\x20\xd0\x44\x7b\xce\x7e\xfd\x05\xf0\x16\x93\x22\x48\xea\xd4\x42\x8e\xe1\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\xd7\x0e\x36\x2b\xb6\x28\x6c\xf0\xe8\x06\xe1\xb7\xc2\xf9\x06\x4d\x66\xcd\x1a\x84\x06\x53\x10\xe2\x9b\xd6\x91\xda\x1b\x16\x28\xe8\x59\xa3\x65\x8d\x86\xdd\x4e\xcd\x7c\x0c\x99\x50\x0e\xe3\xbe\xce\x63\x4e\xa7\x91\xfa\xc6\x5c\x93\x64\x63\x09\xc2\xb6\x04\x93\x27\x26\x8d\xc1\x40\xe7\xa8\x8f\x81\x6e\xd8\xed\x10\xdf\x31\x64\x85\xe6\x6d\x7b\xca\x2c\x07\x90\x2e\xfa\xf0\xb5\xdb\x21\xb1\xa7\x22\xf7\x85\x45\xb6\x27\x5a\x6b\xac\x03\xb9\x5e\x63\x2a\x85\x47\x55\x76\x3b\x9d\x1b\x61\xc3\x02\x4c\x40\x99\xe5\x70\x89\x7e\x46\xaf\xbd\xfe\x49\xb7\xd3\x91\x19\xf4\xc2\xea\x93\xc9\x84\xb3\x4f\x26\x35\xa6\x41\x7c\xc7\xaf\xa4\x1b\x66\xa2\x50\xbe\xde\x97\x98\x3a\x16\x7d\x61\x35\x3d\xde\x05\x2d\x3e\x22\x18\xad\x4a\x48\x28\xcb\x88\x05\x85\xa7\x2b\x9d\xc7\x75\x3c\x9c\x1b\x40\x26\x1c\x99\x50\x66\xb0\x41\xc8\x2d\x3e\x4f\x56\x48\xbe\xd3\x09\x46\x2d\x5d\xe9\xd8\xa9\x13\xa0\xdd\x86\x26\x1f\x7a\xf3\xae\x58\x2f\xd0\xf6\xfa\xf0\x1d\x8c\x6f\xb3\x71\x1f\x26\x13\x7e\xa8\x74\x8f\x3c\x51\x5f\x92\x62\xf2\x78\x50\xe6\xbf\xf2\x56\xea\x65\x38\x6b\xd4\xf5\x2c\x03\x01\x1a\x37\x90\x18\xcd\xa0\x26\xaf\x2c\x50\xea\x25\x24\x16\x85\xc7\x74\x00\x22\x4d\xc1\x9b\x80\xbc\x1a\x67\xed\x2d\xe1\xbb\xef\xa0\x47\x9b\x4d\xe0\xe8\xf4\x72\x36\x9d\xcf\x8e\xe0\x8f\x3f\x20\x7c\x79\x1a\xbe\xbc\x7c\xda\x6f\x68\x26\xf5\x45\x96\x45\xe5\x58\xe0\x30\x47\xbc\xee\xbd\xe8\x0f\x6f\x84\x2a\xf0\x22\x0b\x6a\x46\xda\x99\x4e\x61\x12\x79\x9e\xed\xf2\xbc\x6c\xf1\x10\xd3\x68\x04\x53\xe7\x70\xbd\x50\xb8\x1f\x90\x31\x62\x39\x78\x9d\xa7\x8c\x45\xe8\x4b\xcc\x3a\x57\x48\xa8\xaa\x76\x8d\xe6\x67\x8d\x3b\xbe\xcc\xf1\x18\x00\xc0\xe4\x03\xfe\x40\xb1\xc0\x1f\xbc\xf9\x19\x6f\xd9\x47\x95\x09\x09\x55\xd3\x34\xb5\xe8\x5c\xaf\xdf\x0f\xe4\x52\xe7\x85\x3f\x6e\x91\xaf\x71\x6d\x6c\x39\x74\x94\x90\x7a\x7c\xb4\x41\x38\x69\xc5\xb3\x14\xee\x4c\x13\x4f\x44\xea\x5b\xe1\x7a\xdb\xa5\x53\xe3\xfc\x71\xb5\x44\x2f\xd5\x1a\xdb\x82\xd8\x8e\xc6\xb7\x47\xfb\xd6\x1a\xf7\xb7\x48\x78\xf1\x43\x9f\x58\xee\x4e\x6a\x7c\xd7\x69\x62\x98\x17\x6e\xd5\x63\x38\x6d\x57\xb7\xa9\x60\x02\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\xb5\x14\x9c\x69\x38\xd2\x05\x65\x5e\x57\x2c\xd8\xe6\xde\x98\x7d\x74\x45\x70\x5d\xcd\xce\xdf\xbc\x9e\x5d\xcd\x2f\x3f\x9c\xce\x8f\x1a\x70\x52\x98\x79\x52\xaa\x7d\x06\x85\x7a\xe9\x57\xac\x3f\x89\x6b\xaf\x7e\x26\x9e\xe7\x2f\xbe\x84\x2f\x30\x39\x10\xf2\x9d\x87\x39\xe0\xf3\x17\x96\x7d\xb7\x6f\xbe\x36\x69\x30\xe6\xd7\x00\x22\x93\xdf\x35\x13\xc7\x81\x58\x5c\xa3\x5f\x99\x94\x93\x63\x22\x42\x7e\xad\xac\x98\x1a\x8d\xff\x7a\x44\x4e\xcf\xcf\x1b\xf1\xc8\xef\xa7\x17\xaf\x9b\x31\x7a\xf4\x7a\x76\x3e\x7b\x3b\x9d\xcf\x76\x69\xaf\xe6\xd3\xf9\xd9\x29\x7f\xad\xc2\x77\x34\x82\xab\x6b\x99\x73\x96\xe5\xdc\x65\xd6\x39\xb7\x8b\xb5\xbe\x6e\x00\x7e\x65\xa8\x11\xb3\xb1\x88\x64\x42\x27\x55\x72\x77\x95\xd3\xbc\x21\x97\x99\x2a\x56\xf6\x53\x41\x13\xa8\xfd\xda\x8d\xd2\xbd\xb7\x18\x37\x4d\x7b\xde\x54\x7a\x6d\x0d\x1a\x3c\xc2\x09\x90\x93\x4c\xef\xf1\x87\x84\xbf\xc1\x18\x8e\xe1\x45\xcc\x24\x0f\xa4\xaa\x97\xf0\x8c\xc4\xff\x89\x84\xf5\xea\x00\xe7\x5f\x33\x6d\x79\xc3\xc4\x15\xb9\x37\xff\xfd\x74\x66\x0a\x7f\x91\x65\xc7\xb0\x6b\xc4\xef\xf7\x8c\x58\xd3\x9f\xa3\xde\xa7\xff\xbf\x3d\xfa\x6d\xea\x23\x54\x99\x1c\x9e\xec\x41\x24\x24\x9e\x27\x3b\x71\x10\x8d\xcb\x2d\x0e\x4b\x83\xc9\x3d\xc9\xf6\x65\x1b\xc3\xf7\x65\x8b\x7f\x2b\xd9\x1e\x6c\xd5\xa8\x21\x6b\x37\x63\x03\xb0\xe8\xad\xc4\x1b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\x6c\x84\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\x6b\x51\xd2\xb8\x92\x15\xfa\xba\x84\xa5\x70\x90\x96\x5a\xac\x65\xe2\x82\x3c\x6e\x70\x2d\x2e\x85\x65\xb1\x16\x7f\x2f\xd0\xd1\xec\x43\x40\x16\x89\x2f\x84\x52\x25\x2c\x25\x0d\x30\xc4\xdd\x7b\xf9\x6a\x3c\x06\xe7\x65\x8e\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x50\xd8\x1f\x76\x1b\x69\xbc\x3e\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xdc\xaf\x7a\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\x69\xe1\x36\x78\x12\x50\x39\x8c\xd2\x68\xe8\xbb\x78\x7d\xd1\xbb\x16\x56\x28\xb1\xc0\xfe\x31\x0f\x81\x6c\xab\x8d\x88\x53\x00\x39\x05\x72\x25\xa4\x06\x91\x24\xa6\xd0\x9e\x0c\x5f\x35\xf4\xaa\xa4\xfc\x7e\xe4\x2b\x79\x3c\x2f\x89\x24\x41\xe7\xaa\x74\xcf\x5e\x23\x75\xc4\x9a\xb8\x41\x6a\x27\x53\x6c\x78\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x04\xae\x8d\xa3\x4d\x16\x08\x1b\x4b\xc3\x87\x93\x3a\xe1\xe9\x3b\x45\xb2\xb6\x03\xa3\x41\x80\x32\x3c\xf2\x73\x8c\x83\xb0\x4b\x37\x0c\xf9\x9e\xb6\xa5\x9c\xa3\xcd\x66\xd8\x06\x72\x13\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x1c\x40\x6e\x72\xce\xd3\xdf\x2a\x67\x31\x59\x5f\xce\x7e\x9d\x5d\xd6\xc5\xff\xf1\x4e\xac\xfa\xfe\xa7\xf5\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x4d\xee\x01\x14\xc9\xdf\xd6\xc6\xf7\x8d\xe3\x28\xe1\xfc\xd6\x31\x4b\x0c\x33\x4d\x53\x01\x57\x28\xef\x76\x72\xf7\x6e\x72\x30\x79\x55\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xd6\xc2\xb6\xe9\xde\xe2\xf3\xac\x61\xe3\x0d\xb7\x5c\x81\xa8\x91\x1a\x78\xbd\xea\xdd\x44\xa8\x06\xac\xbb\x29\x3c\xc1\x81\xea\xf7\x36\xf9\x2d\x85\xfb\xe0\xd8\xeb\x31\xfd\x2d\xe4\xf2\x4c\xfb\x5e\xb5\x78\xa6\xe1\x39\x54\x2f\x94\xd4\xe1\x79\x2b\x8a\x0e\x64\xc7\x4e\x8a\x0a\x3d\xc2\x56\xc4\x09\xec\x7c\x22\x41\xc1\x1c\x6c\x34\x8b\x7e\xbf\x38\x8f\xa3\x34\x32\xd8\x13\x8b\x7e\x88\xbf\x17\x42\xb9\xde\xb8\x6e\x16\xc2\x09\xbc\xe1\xf2\x36\xa9\x0b\x5c\x55\x01\x89\xa7\xd5\x7e\x44\x81\x81\x2d\x5a\xa3\x62\x4b\x17\xa1\x6a\xa5\xf8\xa0\x84\x28\x22\xa6\x8d\xda\x97\x11\x98\x87\xfa\xcf\x4e\x93\x00\x9e\xd6\x0d\x41\x26\xa4\x2a\x2c\x3e\x3d\x81\x03\x69\xc7\x15\x36\x13\x09\xfb\xd2\x21\xf0\xc4\xea\xc0\x99\x35\xae\xcc\x26\x28\x70\x28\x79\xed\x83\xa3\xc6\xc1\x4e\xf9\xe0\xab\x17\xe1\xa0\x70\x62\x89\x0d\x70\xd4\x06\xaf\x1c\x75\x70\x8c\xfe\xd3\xd0\x79\x56\xbf\x3e\x02\x45\x77\xff\x19\x78\xec\xf8\x79\xaf\xcf\xa9\x88\xb8\xdb\x69\xbc\x54\xca\x86\x66\xe4\xaf\xe5\xf8\x47\x47\xd8\x2e\x6d\x38\x5a\x9b\x38\x1c\x70\xdb\xd7\x7c\xdb\xfd\xf5\xea\x7d\x9e\xbf\xaf\x65\x22\x8c\xea\xdf\x30\xf1\x5b\x9c\x72\x97\x43\x6f\xb9\xc5\x1b\x69\x0a\x2a\x60\xf8\xbf\x34\x12\xd6\x2d\xdf\x5d\xb7\x73\x17\xef\xc6\xd8\x6f\xcd\xcb\xb1\xcd\x2a\xde\xed\x86\x6e\xa9\x51\x3e\x0c\xd7\xd6\x78\x65\x96\x85\x5b\xd7\x0e\xf3\x3f\x70\x49\x16\x03\xdd\x9b\x9c\xda\x81\x58\x9d\x94\x45\x91\x96\x75\x41\x1c\x84\x46\x04\x56\x42\xa7\x71\x18\x11\x69\x2a\x49\x1e\x83\x90\x34\x14\x4b\x21\x75\xf7\xa0\x19\xbf\x59\x85\x0f\x21\x63\xaf\xb7\x6d\x16\xd2\x38\x44\xd2\xc4\xc7\x1a\x77\x1f\x51\x30\x77\x82\x68\xf7\xbe\x2f\x5e\x19\x1a\xed\x8a\x35\x77\xc2\x20\x6e\x84\x54\x82\xa6\x2f\xee\xb0\x74\x0a\x89\x42\xa1\xc3\x2d\x3f\x66\xde\xdc\xa0\x75\xdd\x47\x80\xfc\xcf\x60\x7c\x27\x2b\x56\xaf\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x98\x37\x44\x96\xf4\xfc\x03\x08\xb5\xef\x3e\x2e\xa4\xb8\x67\x22\x9a\x9f\x60\xdc\xe8\xcb\xff\x2a\x41\xb6\x0f\xb1\xf3\xba\x3f\x8b\x87\xf7\xc6\x0c\x40\xa1\xe0\x29\xa9\xfa\x3d\x53\xf5\xa3\x0f\x0d\x6d\x55\xf4\x86\x8e\x6e\x2f\x7c\xf9\x5e\x6b\x85\xd5\x0d\x48\x68\xed\x17\x88\x1a\xa4\x47\x2b\x68\x1e\x22\x74\xc5\x3f\x0a\xa4\xa5\x63\x71\xec\x17\x49\x41\x17\x05\xc7\xeb\x7d\x2a\xcc\x52\x2f\x87\xdd\x4e\xf8\xde\x88\xf7\xc4\xdf\x6e\xe3\x3d\x54\x40\xe6\x8c\x77\x02\xf5\x95\x40\xe2\x6f\xb9\x5b\xe4\xb1\x79\xe7\x5e\x80\xd6\xe8\x53\x98\xa9\x77\x6e\x01\x98\x31\xde\x04\xec\x5e\x36\xd2\x1a\x7f\x6b\x01\x9c\x49\x97\xc2\x05\x31\x3b\x21\xe1\x6f\xf7\x23\xa2\x62\xa0\x60\x38\x3e\xcc\x40\x4b\x07\x98\x76\x6e\x26\x88\x98\x3f\x85\xd5\x50\xcf\x8f\x9b\xab\xe1\x53\x3c\xa8\x5c\x37\x6c\x23\xd7\x6c\x9b\xbb\x93\xc3\x49\x6e\x5c\xe1\xf1\x70\x32\x23\x9b\xd7\x80\xbd\x87\xb5\x39\x6b\xec\x93\x3c\x94\x2a\x59\x7a\x95\xd9\xee\x61\x65\xe9\x8d\x96\xc3\xdf\x3e\x5e\x64\x4d\xdc\x54\xb1\x45\xd3\x12\xc2\xb7\x8d\x7b\xcb\x87\x26\x2d\x1a\x54\x22\x61\xd5\x5c\x4d\x26\x4f\xc7\xb7\xf5\xcf\x81\x98\xab\x5a\x34\x95\x12\x21\x32\xc2\x79\x39\x2a\xe4\x3f\x30\x6e\xdb\x8c\xc1\x6a\x09\x2c\x86\x9f\x18\xdc\xcd\x52\x08\x9a\x05\x37\x10\x85\xa3\x51\x74\x1b\x5b\x29\x3a\x69\x31\x85\x4c\xa2\x4a\xc1\xa4\x68\x79\xd0\xfd\xcd\x19\x1d\x7e\x57\xa1\x95\x24\x31\xfc\x96\x0b\x7f\xc8\xf9\x67\xa1\x96\x09\xfa\x12\x32\x14\xfc\xdf\xc9\x1b\xc8\x85\x73\xb0\x46\x41\xa3\x6d\x56\x28\x55\x82\xb1\x29\x92\xf0\x7a\xd6\xa3\xb0\x36\x50\x38\xb4\x0e\x36\x2b\x13\x4b\x2d\xb7\x78\x39\x75\xab\xd2\x0f\xe2\x75\x8e\x74\xb9\x12\x25\x48\x4f\x65\x3d\x1e\xaa\x19\xe9\xf5\xcf\x1e\xfe\x63\x64\xc8\xc0\xfb\x61\x5e\x4d\x85\xed\x38\xe7\xcf\xf4\xd6\x8e\xf0\x38\x14\xb5\x63\x7b\x7b\xd1\xd5\x0e\xe4\xaa\xf4\xb4\xa3\xb5\x59\xc8\xda\x21\xc9\x2b\xfc\xd6\x0e\xc6\x46\xab\xcd\x0b\x8c\xa0\x9a\x81\xdf\x76\xc2\x93\xb5\x8c\xf1\x19\x7e\x6d\xd6\xe4\xfc\x36\x88\x80\x21\x2f\xf6\xc8\x38\xd7\x58\x52\x36\x0f\x36\x6a\x94\xa6\xf0\xe1\xf3\x35\x96\x5f\x0e\x57\xa2\x08\xc7\x06\x5d\x5d\x7a\xaa\xb0\x08\x6b\x0f\x24\x83\x5a\x0b\x39\x19\x9f\x80\xfc\xb1\xc9\x50\x55\x4f\x90\xcf\x9e\x55\x7b\x36\xd7\x3f\xcb\x2f\x55\x84\xd7\x88\xdf\x59\xef\xb7\x34\x8a\x31\x12\x68\x28\x28\xba\x77\xdd\x7f\x06\x00\x00\xff\xff\x5a\x43\x33\xde\x00\x22\x00\x00") func call_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -133,11 +133,11 @@ func call_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xef, 0x68, 0xda, 0xd8, 0x9, 0xf5, 0xd5, 0x71, 0xa8, 0x8a, 0xfb, 0x30, 0xe8, 0xf0, 0x72, 0x14, 0x36, 0x6b, 0x62, 0x5a, 0x4e, 0xff, 0x16, 0xdc, 0xd3, 0x2c, 0x68, 0x7b, 0x79, 0x9f, 0xd3}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x39, 0xde, 0xc6, 0x79, 0xff, 0xe3, 0x5d, 0x47, 0xed, 0xbd, 0xf4, 0x21, 0xe8, 0xc9, 0x4, 0xe0, 0xe0, 0xe4, 0x76, 0x88, 0x25, 0x7f, 0x4f, 0x30, 0xfe, 0x30, 0x1f, 0x8c, 0x4d, 0x76, 0x3d}} return a, nil } -var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xdf\x6f\xda\xca\x12\x7e\x86\xbf\x62\x94\x27\x50\x29\x60\x63\x08\x38\x27\x47\xe2\xa6\xf4\x1c\xae\xd2\x24\x02\x72\x8f\x2a\x94\x87\x05\xc6\xb0\xaa\xf1\x5a\xbb\x6b\x72\xb8\x55\xfe\xf7\xab\xd9\x59\x03\xf9\x75\xdb\x4a\xa7\x0f\x3b\xb5\x77\xbe\x6f\xbe\x9d\x19\xcf\x92\x56\x0b\xae\x54\xbe\xd7\x72\xbd\xb1\x10\xb6\x83\x73\x98\x6d\x10\xd6\xea\x23\xda\x0d\x6a\x2c\xb6\x30\x2c\xec\x46\x69\x53\x6d\xb5\x60\xb6\x91\x06\x12\x99\x22\x48\x03\xb9\xd0\x16\x54\x02\xf6\x85\x7f\x2a\x17\x5a\xe8\x7d\xb3\xda\x6a\x31\xe6\xcd\x6d\x62\x48\x34\x22\x18\x95\xd8\x47\xa1\x31\x86\xbd\x2a\x60\x29\x32\xd0\xb8\x92\xc6\x6a\xb9\x28\x2c\x82\xb4\x20\xb2\x55\x4b\x69\xd8\xaa\x95\x4c\xf6\x44\x29\x2d\x14\xd9\x0a\xb5\x0b\x6d\x51\x6f\x4d\xa9\xe3\x8f\x9b\x7b\xb8\x46\x63\x50\xc3\x1f\x98\xa1\x16\x29\xdc\x15\x8b\x54\x2e\xe1\x5a\x2e\x31\x33\x08\xc2\x40\x4e\x6f\xcc\x06\x57\xb0\x70\x74\x04\xfc\x4c\x52\xa6\x5e\x0a\x7c\x56\x45\xb6\x12\x56\xaa\xac\x01\x28\x49\x39\xec\x50\x1b\xa9\x32\xe8\x94\xa1\x3c\x61\x03\x94\x26\x92\x9a\xb0\x74\x00\x0d\x2a\x27\x5c\x1d\x44\xb6\x87\x54\xd8\x23\xf4\x27\x12\x72\x3c\xf7\x0a\x64\xe6\xc2\x6c\x54\x8e\x60\x37\xc2\xd2\xa9\x1f\x65\x9a\xc2\x02\xa1\x30\x98\x14\x69\x83\xd8\x16\x85\x85\xbf\xc6\xb3\x3f\x6f\xef\x67\x30\xbc\xf9\x0a\x7f\x0d\x27\x93\xe1\xcd\xec\xeb\x05\x3c\x4a\xbb\x51\x85\x05\xdc\x21\x53\xc9\x6d\x9e\x4a\x5c\xc1\xa3\xd0\x5a\x64\x76\x0f\x2a\x21\x86\x2f\xa3\xc9\xd5\x9f\xc3\x9b\xd9\xf0\x5f\xe3\xeb\xf1\xec\x2b\x28\x0d\x9f\xc7\xb3\x9b\xd1\x74\x0a\x9f\x6f\x27\x30\x84\xbb\xe1\x64\x36\xbe\xba\xbf\x1e\x4e\xe0\xee\x7e\x72\x77\x3b\x1d\x35\x61\x8a\xa4\x0a\x09\xff\xe3\x9c\x27\xae\x7a\x1a\x61\x85\x56\xc8\xd4\x94\x99\xf8\xaa\x0a\x30\x1b\x55\xa4\x2b\xd8\x88\x1d\x82\xc6\x25\xca\x1d\xae\x40\xc0\x52\xe5\xfb\x9f\x2e\x2a\x71\x89\x54\x65\x6b\x77\xe6\x77\x1b\x12\xc6\x09\x64\xca\x36\xc0\x20\xc2\x6f\x1b\x6b\xf3\xb8\xd5\x7a\x7c\x7c\x6c\xae\xb3\xa2\xa9\xf4\xba\x95\x32\x9d\x69\xfd\xde\xac\x12\x27\xee\xb6\x2b\x69\x66\x5a\x2c\x51\x83\x46\x5b\xe8\xcc\x80\x29\x92\x84\xfc\x2c\xc8\x2c\x51\x7a\xeb\xda\x04\x12\xad\xb6\x20\xc0\x92\x2f\x58\x05\x39\x6a\xda\xf4\x14\x1f\x8d\xdd\xa7\x4e\xe6\x4a\x1a\x61\x0c\x6e\x17\xe9\xbe\x59\xfd\x5e\xad\x18\x2b\x96\xdf\x62\x98\x7f\x57\xb9\x89\x61\xfe\xf0\xf4\xd0\xa8\x56\x2b\x59\x5e\x98\x0d\x9a\x18\xbe\xb7\x63\x68\x37\x20\x88\x21\x68\x40\xe8\xd6\x8e\x5b\x23\xb7\x76\xdd\xda\x73\xeb\xb9\x5b\xfb\x6e\x1d\xb8\x35\x68\xb3\x61\x74\xc0\x6e\x01\xfb\x05\xec\x18\xb0\x67\xc8\x9e\xa1\x8f\xc3\x81\x42\x8e\x14\x72\xa8\x90\x63\x85\xcc\xd2\x61\x97\x88\x59\x22\x66\xe9\x32\x4b\x97\x59\xba\xec\xd2\x65\x96\xae\x17\xdc\x75\xe7\xe9\x32\x4b\xf7\x9c\x9f\x98\xa5\xcb\x2c\x3d\x3e\x72\x8f\x01\x3d\x7f\x44\x06\xf4\x58\x7c\x8f\x01\x3d\x06\xf4\x19\xd0\xe7\xb0\xfd\x90\x9f\x3a\x6c\x98\xa5\xcf\x61\xfb\x3d\x36\x1c\xb6\xcf\x2c\x7d\x66\x19\xb0\xf8\x41\xe0\xf6\x06\x1c\x6f\xc0\xf1\x06\x3e\xab\x65\x5a\x7d\x5e\xdb\x3e\xb1\xed\xd0\xdb\x8e\xb7\x91\xb7\x5d\x6f\x7d\xe6\xdb\x3e\xf5\x6d\x9f\xfb\xb6\xe7\x3b\xd4\xc9\xf3\x05\x9e\x2f\xf0\x7c\x81\xe7\x0b\x3c\x5f\x59\xc9\xb2\x94\x65\x2d\x7d\x31\x03\x5f\xcd\xc0\x97\x33\xf0\xf5\x0c\x7c\x41\x03\x5f\xd1\xc0\x97\x34\xf0\x35\x0d\x42\xcf\x17\xf6\x63\x08\xc9\x0e\x62\xe8\x34\x20\xe8\xb4\x63\x88\xc8\x06\x31\x74\xc9\x86\x31\xf4\xc8\x76\x62\x38\x27\x1b\xc5\xd0\x27\xdb\x8d\x61\x40\x96\xf8\xa8\x6b\x3b\x44\x48\x8c\x1d\x52\x48\x94\x1d\x92\x48\x9c\x11\x69\x24\xd2\x88\x44\x12\x6b\x44\x2a\x89\x36\x22\x99\xc4\x1b\x45\xac\x23\xea\xb2\x8e\xa8\xc7\x3a\xa2\x73\xd6\x41\xdd\xe7\x00\x03\xd6\x41\xfd\x47\x3a\xa8\x01\x49\x87\xeb\x40\xd2\xe1\x7a\x90\x74\xb8\x2e\x24\x4a\xea\x43\xa7\xc3\x75\x22\x91\x52\x2f\x3a\x1d\xae\x1b\x89\xd6\xf5\x23\xf1\xfa\x8e\x0c\x7a\x81\xb7\xa1\xb7\x1d\x6f\x23\x67\xc3\xc8\x7f\x45\x91\xff\x8c\x22\xff\x1d\x45\x1d\xbf\xef\xfd\xdc\x47\xf0\x44\xdf\x79\xab\x05\x1a\x4d\x91\x5a\x1a\xfe\x32\xdb\xa9\x6f\x34\x9e\x37\x98\x81\x48\x53\x37\xc7\x54\xbe\x54\x2b\x34\x3c\x1f\x17\x88\x19\x48\x8b\x5a\xd0\x05\xa1\x76\xa8\xe9\x6e\x2c\x27\x93\xa3\x23\x4c\x22\x33\x91\x96\xc4\x7e\x86\xd2\x60\x92\xd9\xba\x59\xad\xf0\xfb\x18\x92\x22\x5b\xd2\xe8\xaa\xd5\xe1\xbb\xa7\x00\xbb\x91\xa6\xe9\x46\xd2\xbc\xfd\xd0\x54\xb9\xb9\x80\x52\x67\x22\xde\x92\x49\xd4\x62\x69\x0b\x91\x02\xfe\x8d\xcb\xc2\xcd\x42\x95\x80\xc8\xbc\x72\x48\x78\xe0\x57\x1c\xfe\x24\x6a\xaa\xd6\x0d\x58\x2d\x28\x78\x19\xc2\x58\xcc\x4f\x23\xd0\xb5\x81\x3b\xd4\xfb\x92\xcb\x5d\x83\x14\xf2\x3f\x5f\x7c\x38\x24\x6a\xc2\xbd\xc9\x5c\xad\x54\x76\x42\x43\xa2\xc5\x16\xe1\xf2\xf4\x74\xc7\xff\x36\x53\xcc\xd6\x76\x03\x1f\x21\x78\xb8\xa8\x7a\x04\x6a\xad\x34\x5c\x42\xaa\xd6\xcd\x35\xda\x11\x3d\xd6\xea\x17\xd5\x4a\x45\x26\x50\x73\xbb\x4c\x5f\x71\xdc\xf3\x33\xf7\xea\xec\x01\x2e\x19\x4a\x9e\x4f\x80\xa9\x41\x20\x80\xa7\xf9\x84\xb9\xdd\xd4\xea\x70\x79\x2a\xc5\xc7\xf7\x74\x2a\xa7\x4b\x05\x2e\xf9\xa9\xa2\xf2\x18\xe8\x1f\x11\xa8\xbc\x69\xd5\x4d\xb1\x5d\xa0\xae\xd5\x1b\x6e\x7b\x45\x84\x10\xc3\x73\x7e\xde\x2b\xcb\x3c\x7f\x70\xcf\x4f\x24\xc9\xa9\x77\x8a\xa9\xb6\xe5\xc9\x7f\x87\xb6\x8f\xee\xce\x9e\x6b\xdc\xa9\x1c\x2e\xe1\xe0\x38\x7f\x05\xe1\x64\x11\x22\x51\xba\x46\x28\x09\x97\xd0\xbe\x00\x09\xbf\xf1\xd9\xfc\x0d\x36\x67\xb6\xa6\xca\x1f\x2e\x40\x7e\xf8\x50\x77\xa0\x8a\x7f\xcb\x1a\x9b\xe4\xea\x72\xc4\x09\xc9\x11\xbf\xd5\x64\xbd\x69\xd5\xd4\x6a\x99\xad\x6b\x41\xaf\xee\x72\x5f\x79\xa2\xc5\x3c\x4a\xbb\x64\x7f\x97\x12\xef\x54\xf7\x67\x58\x0a\x83\x70\x76\x35\xbc\xbe\x3e\x8b\xe1\xf8\x70\x75\xfb\x69\x74\x16\x1f\x0e\x29\x33\x63\xe9\xe7\x2b\x97\xf8\x24\x6e\xa7\xde\xdc\x89\xb4\xc0\xdb\x84\xeb\x7d\x70\x97\xff\xc5\xd7\xde\xd1\x2b\x6f\x2e\xe0\xfc\x6c\x2d\x8c\x6b\x87\x17\x80\xf6\xbb\x00\xab\xde\xf2\x0f\x9e\xa7\xe1\x39\xc4\x31\xbd\x85\x0a\x4f\x50\x2f\x30\x32\xcb\x0b\x7b\xc0\x6c\x71\xab\xf4\xbe\x69\xe8\x87\x4f\xcd\xe7\xa4\x71\x48\xce\x07\x7f\xee\x17\x14\xc7\x5e\xcf\x8a\x34\x7d\xbe\xc7\x73\xe4\x9d\x4d\x95\x73\x4e\xe6\xbe\x77\x4e\x3e\x02\xd7\x02\xec\xe7\xa3\x2d\x34\x8a\x6f\x17\xc7\x8a\x7e\x1a\x5d\x8f\xfe\x18\xce\x46\xcf\x2a\x3b\x9d\x0d\x67\xe3\x2b\x7e\xf5\xe3\xda\x86\xbf\x54\xdb\xd7\x9d\x70\x3c\x87\x3b\x06\xbc\x6a\xc1\xb7\x5b\xe0\x97\x7b\xe0\x97\x9a\xe0\x58\xd0\x7f\xa2\xa2\xff\xbf\xa4\xff\x74\x4d\x27\xa3\xd9\xfd\xe4\xe6\xa4\x74\xf4\xe7\xca\x4f\x7c\x33\xde\xf5\xed\xba\x05\xaf\xdc\x79\x7c\xf9\x2b\xee\x8d\xc6\x57\x85\x6d\xb8\xd0\x1f\x4a\xd6\x77\xf4\x4e\x67\xb7\x77\xc7\xde\xbb\x1f\x5f\x8d\x0f\x43\xe5\x47\x31\xda\x0d\x68\xbf\xc3\xfa\xef\xfb\x2f\x77\x9f\x46\xd3\x99\x67\x2a\x33\x9b\x2f\x0f\x9f\xe9\x1a\xed\xdd\x55\xed\x64\x06\xca\xa4\x9c\x7f\xd2\xdc\x51\x9a\xcb\xe9\x77\x40\xa7\x98\x1d\xe0\xcf\x6e\x0e\xf8\x08\xed\xbf\xbb\x78\xe4\x3a\x0e\xf7\x97\x05\xf3\x37\x98\x23\x3e\xd6\xf5\xd9\x45\x7a\x3c\xdd\xf3\x3b\x88\xf1\xd5\xca\x53\xf5\xa9\xfa\xbf\x00\x00\x00\xff\xff\x51\x4b\xdc\x7e\x62\x10\x00\x00") +var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xdf\x6f\xda\xca\x12\x7e\x86\xbf\x62\x94\x27\x50\x29\x60\x63\x08\x38\x27\x47\xe2\xa6\xf4\x1c\xae\xd2\x24\x02\x72\x8f\x2a\x94\x87\x05\xc6\xb0\xaa\xf1\x5a\xbb\x6b\x72\xb8\x55\xfe\xf7\xab\xd9\x59\x03\xf9\x75\xdb\x4a\xa7\x0f\x3b\xb5\x77\xbe\x6f\xbe\x9d\x19\xcf\x92\x56\x0b\xae\x54\xbe\xd7\x72\xbd\xb1\x10\xb6\x83\x73\x98\x6d\x10\xd6\xea\x23\xda\x0d\x6a\x2c\xb6\x30\x2c\xec\x46\x69\x53\x6d\xb5\x60\xb6\x91\x06\x12\x99\x22\x48\x03\xb9\xd0\x16\x54\x02\xf6\x85\x7f\x2a\x17\x5a\xe8\x7d\xb3\xda\x6a\x31\xe6\xcd\x6d\x62\x48\x34\x22\x18\x95\xd8\x47\xa1\x31\x86\xbd\x2a\x60\x29\x32\xd0\xb8\x92\xc6\x6a\xb9\x28\x2c\x82\xb4\x20\xb2\x55\x4b\x69\xd8\xaa\x95\x4c\xf6\x44\x29\x2d\x14\xd9\x0a\xb5\x0b\x6d\x51\x6f\x4d\xa9\xe3\x8f\x9b\x7b\xb8\x46\x63\x50\xc3\x1f\x98\xa1\x16\x29\xdc\x15\x8b\x54\x2e\xe1\x5a\x2e\x31\x33\x08\xc2\x40\x4e\x6f\xcc\x06\x57\xb0\x70\x74\x04\xfc\x4c\x52\xa6\x5e\x0a\x7c\x56\x45\xb6\x12\x56\xaa\xac\x01\x28\x49\x39\xec\x50\x1b\xa9\x32\xe8\x94\xa1\x3c\x61\x03\x94\x26\x92\x9a\xb0\x74\x00\x0d\x2a\x27\x5c\x1d\x44\xb6\x87\x54\xd8\x23\xf4\x27\x12\x72\x3c\xf7\x0a\x64\xe6\xc2\x6c\x54\x8e\x60\x37\xc2\xd2\xa9\x1f\x65\x9a\xc2\x02\xa1\x30\x98\x14\x69\x83\xd8\x16\x85\x85\xbf\xc6\xb3\x3f\x6f\xef\x67\x30\xbc\xf9\x0a\x7f\x0d\x27\x93\xe1\xcd\xec\xeb\x05\x3c\x4a\xbb\x51\x85\x05\xdc\x21\x53\xc9\x6d\x9e\x4a\x5c\xc1\xa3\xd0\x5a\x64\x76\x0f\x2a\x21\x86\x2f\xa3\xc9\xd5\x9f\xc3\x9b\xd9\xf0\x5f\xe3\xeb\xf1\xec\x2b\x28\x0d\x9f\xc7\xb3\x9b\xd1\x74\x0a\x9f\x6f\x27\x30\x84\xbb\xe1\x64\x36\xbe\xba\xbf\x1e\x4e\xe0\xee\x7e\x72\x77\x3b\x1d\x35\x61\x8a\xa4\x0a\x09\xff\xe3\x9c\x27\xae\x7a\x1a\x61\x85\x56\xc8\xd4\x94\x99\xf8\xaa\x0a\x30\x1b\x55\xa4\x2b\xd8\x88\x1d\x82\xc6\x25\xca\x1d\xae\x40\xc0\x52\xe5\xfb\x9f\x2e\x2a\x71\x89\x54\x65\x6b\x77\xe6\x77\x1b\x12\xc6\x09\x64\xca\x36\xc0\x20\xc2\x6f\x1b\x6b\xf3\xb8\xd5\x7a\x7c\x7c\x6c\xae\xb3\xa2\xa9\xf4\xba\x95\x32\x9d\x69\xfd\xde\xac\x12\x27\xee\xb6\x2b\x69\x66\x5a\x2c\x51\x83\x46\x5b\xe8\xcc\x80\x29\x92\x44\x2e\x25\x66\x16\x64\x96\x28\xbd\x75\x7d\x02\x89\x56\x5b\x10\x60\xc9\x19\xac\x82\x1c\x35\x6d\x7a\x8e\x8f\xc6\xee\x53\xa7\x73\x25\x8d\x30\x06\xb7\x8b\x74\xdf\xac\x7e\xaf\x56\x8c\x15\xcb\x6f\x31\xcc\xbf\xab\xdc\xc4\x30\x7f\x78\x7a\x68\x54\xab\x95\x2c\x2f\xcc\x06\x4d\x0c\xdf\xdb\x31\xb4\x1b\x10\xc4\x10\x34\x20\x74\x6b\xc7\xad\x91\x5b\xbb\x6e\xed\xb9\xf5\xdc\xad\x7d\xb7\x0e\xdc\x1a\xb4\xd9\x30\x3a\x60\xb7\x80\xfd\x02\x76\x0c\xd8\x33\x64\xcf\xd0\xc7\xe1\x40\x21\x47\x0a\x39\x54\xc8\xb1\x42\x66\xe9\xb0\x4b\xc4\x2c\x11\xb3\x74\x99\xa5\xcb\x2c\x5d\x76\xe9\x32\x4b\xd7\x0b\xee\xba\xf3\x74\x99\xa5\x7b\xce\x4f\xcc\xd2\x65\x96\x1e\x1f\xb9\xc7\x80\x9e\x3f\x22\x03\x7a\x2c\xbe\xc7\x80\x1e\x03\xfa\x0c\xe8\x73\xd8\x7e\xc8\x4f\x1d\x36\xcc\xd2\xe7\xb0\xfd\x1e\x1b\x0e\xdb\x67\x96\x3e\xb3\x0c\x58\xfc\x20\x70\x7b\x03\x8e\x37\xe0\x78\x03\x9f\xd5\x32\xad\x3e\xaf\x6d\x9f\xd8\x76\xe8\x6d\xc7\xdb\xc8\xdb\xae\xb7\x3e\xf3\x6d\x9f\xfa\xb6\xcf\x7d\xdb\xf3\x1d\xea\xe4\xf9\x02\xcf\x17\x78\xbe\xc0\xf3\x05\x9e\xaf\xac\x64\x59\xca\xb2\x96\xbe\x98\x81\xaf\x66\xe0\xcb\x19\xf8\x7a\x06\xbe\xa0\x81\xaf\x68\xe0\x4b\x1a\xf8\x9a\x06\xa1\xe7\x0b\xfb\x31\x84\x64\x07\x31\x74\x1a\x10\x74\xda\x31\x44\x64\x83\x18\xba\x64\xc3\x18\x7a\x64\x3b\x31\x9c\x93\x8d\x62\xe8\x93\xed\xc6\x30\x20\x4b\x7c\xd4\xb5\x1d\x22\x24\xc6\x0e\x29\x24\xca\x0e\x49\x24\xce\x88\x34\x12\x69\x44\x22\x89\x35\x22\x95\x44\x1b\x91\x4c\xe2\x8d\x22\xd6\x11\x75\x59\x47\xd4\x63\x1d\xd1\x39\xeb\xa0\xee\x73\x80\x01\xeb\xa0\xfe\x23\x1d\xd4\x80\xa4\xc3\x75\x20\xe9\x70\x3d\x48\x3a\x5c\x17\x12\x25\xf5\xa1\xd3\xe1\x3a\x91\x48\xa9\x17\x9d\x0e\xd7\x8d\x44\xeb\xfa\x91\x78\x7d\x47\x06\xbd\xc0\xdb\xd0\xdb\x8e\xb7\x91\xb3\x61\xe4\xbf\xa2\xc8\x7f\x46\x91\xff\x8e\xa2\x8e\xdf\xf7\x7e\xee\x23\x78\xa2\xef\xbc\xd5\x02\x8d\xa6\x48\x2d\x4d\x7f\x99\xed\xd4\x37\x9a\xcf\x1b\xcc\x40\xa4\xa9\x1b\x64\x2a\x5f\xaa\x15\x1a\x1e\x90\x0b\xc4\x0c\xa4\x45\x2d\xe8\x86\x50\x3b\xd4\x74\x39\x96\xa3\xc9\xd1\x11\x26\x91\x99\x48\x4b\x62\x3f\x44\x69\x30\xc9\x6c\xdd\xac\x56\xf8\x7d\x0c\x49\x91\x2d\x69\x74\xd5\xea\xf0\xdd\x53\x80\xdd\x48\xd3\x74\x23\x69\xde\x7e\x68\xaa\xdc\x5c\x40\xa9\x33\x11\x6f\xc9\x24\x6a\xb1\xb4\x85\x48\x01\xff\xc6\x65\xe1\x66\xa1\x4a\x40\x64\x5e\x39\x24\x3c\xf1\x2b\x0e\x7f\x12\x35\x55\xeb\x06\xac\x16\x14\xbc\x0c\x61\x2c\xe6\xa7\x11\xe8\xde\xc0\x1d\xea\x7d\xc9\xe5\xee\x41\x0a\xf9\x9f\x2f\x3e\x1c\x12\x35\xe1\xde\x64\xae\x56\x2a\x3b\xa1\x21\xd1\x62\x8b\x70\x79\x7a\xba\xe3\x7f\x9b\x29\x66\x6b\xbb\x81\x8f\x10\x3c\x5c\x54\x3d\x02\xb5\x56\x1a\x2e\x21\x55\xeb\xe6\x1a\xed\x88\x1e\x6b\xf5\x8b\x6a\xa5\x22\x13\xa8\xb9\x5d\xa6\xaf\x38\xee\xf9\x99\x7b\x75\xf6\x00\x97\x0c\x25\xcf\x27\xc0\xd4\x20\x10\xc0\xd3\x7c\xc2\xdc\x6e\x6a\x75\xb8\x3c\x95\xe2\xe3\x7b\x3a\x95\xd3\xa5\x02\x97\xfc\x54\x51\x79\x0c\xf4\x8f\x08\x54\xde\xb4\xea\xa6\xd8\x2e\x50\xd7\xea\x0d\xb7\xbd\x22\x42\x88\xe1\x39\x3f\xef\x95\x65\x9e\x3f\xb8\xe7\x27\x92\xe4\xd4\x3b\xc5\x54\xdb\xf2\xe4\xbf\x43\xdb\x47\x77\x67\xcf\x35\xee\x54\x0e\x97\x70\x70\x9c\xbf\x82\x70\xb2\x08\x91\x28\x5d\x23\x94\x84\x4b\x68\x5f\x80\x84\xdf\xf8\x6c\xfe\x06\x9b\x33\x5b\x53\xe5\x0f\x17\x20\x3f\x7c\xa8\x3b\x50\xc5\xbf\x65\x8d\x4d\x72\x75\x39\xe2\x84\xe4\x88\xdf\x6a\xb2\xde\xb4\x6a\x6a\xb5\xcc\xd6\xb5\xa0\x57\x77\xb9\xaf\x3c\xd1\x62\x1e\xa5\x5d\xb2\xbf\x4b\x89\x77\xaa\xfb\x33\x2c\x85\x41\x38\xbb\x1a\x5e\x5f\x9f\xc5\x70\x7c\xb8\xba\xfd\x34\x3a\x8b\x0f\x87\x94\x99\xb1\xf4\xfb\x95\x4b\x7c\x12\xb7\x53\x6f\xee\x44\x5a\xe0\x6d\xc2\xf5\x3e\xb8\xcb\xff\xe2\x6b\xef\xe8\x95\x37\x17\x70\x7e\xb6\x16\xc6\xb5\xc3\x0b\x40\xfb\x5d\x80\x55\x6f\xf9\x07\xcf\xd3\xf0\x1c\xe2\x98\xde\x42\x85\x27\xa8\x17\x18\x99\xe5\x85\x3d\x60\xb6\xb8\x55\x7a\xdf\x34\xf4\xcb\xa7\xe6\x73\xd2\x38\x24\xe7\x83\x3f\xf7\x0b\x8a\x63\xaf\x67\x45\x9a\x3e\xdf\xe3\x39\xf2\xce\xa6\xca\x39\x27\x73\xdf\x3b\x27\x1f\x81\x6b\x01\xf6\xf3\xd1\x16\x1a\xc5\xb7\x8b\x63\x45\x3f\x8d\xae\x47\x7f\x0c\x67\xa3\x67\x95\x9d\xce\x86\xb3\xf1\x15\xbf\xfa\x71\x6d\xc3\x5f\xaa\xed\xeb\x4e\x38\x9e\xc3\x1d\x03\x5e\xb5\xe0\xdb\x2d\xf0\xcb\x3d\xf0\x4b\x4d\x70\x2c\xe8\x3f\x51\xd1\xff\x5f\xd2\x7f\xba\xa6\x93\xd1\xec\x7e\x72\x73\x52\x3a\xfa\x7b\xe5\x27\xbe\x19\xef\xfa\x76\xdd\x82\x57\xee\x3c\xbe\xfc\x15\xf7\x46\xe3\xab\xc2\x36\x5c\xe8\x0f\x25\xeb\x3b\x7a\xa7\xb3\xdb\xbb\x63\xef\xdd\x8f\xaf\xc6\x87\xa1\xf2\xa3\x18\xed\x06\xb4\xdf\x61\xfd\xf7\xfd\x97\xbb\x4f\xa3\xe9\xcc\x33\x95\x99\xcd\x97\x87\xcf\x74\x8d\xf6\xee\xaa\x76\x32\x03\x65\x52\xce\x3f\x69\xee\x28\xcd\xe5\xf4\x3b\xa0\x53\xcc\x0e\xf0\x67\x37\x07\x7c\x84\xf6\xdf\x5d\x3c\x72\x1d\x87\xfb\xcb\x82\xf9\x1b\xcc\x11\x1f\xeb\xfa\xec\x22\x3d\x9e\xee\xf9\x1d\xc4\xf8\x6a\xe5\xa9\xfa\x54\xfd\x5f\x00\x00\x00\xff\xff\xdf\x2f\xd9\xfa\x63\x10\x00\x00") func evmdis_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -153,7 +153,7 @@ func evmdis_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "evmdis_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd5, 0xe8, 0x96, 0xa1, 0x8b, 0xc, 0x68, 0x3c, 0xe8, 0x5d, 0x7e, 0xf0, 0xab, 0xfe, 0xec, 0xd1, 0xb, 0x3d, 0xfc, 0xc7, 0xac, 0xb5, 0xa, 0x41, 0x55, 0x0, 0x3a, 0x60, 0xa7, 0x8e, 0x46, 0x93}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0xc8, 0x73, 0x8e, 0xfb, 0x1f, 0x84, 0x7d, 0x37, 0xd9, 0x26, 0x24, 0x37, 0xb8, 0x65, 0xb1, 0xed, 0xa0, 0x76, 0x9a, 0xf0, 0x8e, 0x3a, 0x9b, 0x20, 0x93, 0x27, 0x26, 0x2e, 0xc9, 0x9b, 0xde}} return a, nil } diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer.js index f8b383cd96..352c309b49 100644 --- a/eth/tracers/internal/tracers/call_tracer.js +++ b/eth/tracers/internal/tracers/call_tracer.js @@ -132,13 +132,12 @@ // If the call was a contract call, retrieve the gas usage and output if (call.gas !== undefined) { call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16); - - var ret = log.stack.peek(0); - if (!ret.equals(0)) { - call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); - } else if (call.error === undefined) { - call.error = "internal failure"; // TODO(karalabe): surface these faults somehow - } + } + var ret = log.stack.peek(0); + if (!ret.equals(0)) { + call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); + } else if (call.error === undefined) { + call.error = "internal failure"; // TODO(karalabe): surface these faults somehow } delete call.gasIn; delete call.gasCost; delete call.outOff; delete call.outLen; @@ -208,7 +207,7 @@ } else if (ctx.error !== undefined) { result.error = ctx.error; } - if (result.error !== undefined) { + if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) { delete result.output; } return this.finalize(result); diff --git a/eth/tracers/testdata/call_tracer_inner_instafail.json b/eth/tracers/testdata/call_tracer_inner_instafail.json new file mode 100644 index 0000000000..86070d1308 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_inner_instafail.json @@ -0,0 +1,72 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": { + "type": "CALL", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0", + "gas": "0x1a466", + "gasUsed": "0x1dc6", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "output": "0x", + "calls": [ + { + "type": "CALL", + "from": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "value": "0x14d1120d7b160000", + "error":"internal failure", + "input": "0x" + } + ] + } +} diff --git a/eth/tracers/testdata/call_tracer_revert_reason.json b/eth/tracers/testdata/call_tracer_revert_reason.json new file mode 100644 index 0000000000..094b044677 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_revert_reason.json @@ -0,0 +1,64 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock":1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": { + "error": "execution reverted", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2d7308", + "gasUsed": "0x588", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "type": "CALL", + "value": "0x0", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + } +} diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index b69c4fb234..e38f4cdc4d 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -39,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" - "github.com/go-test/deep" ) // To generate a new callTracer test, copy paste the makeTest method below into @@ -271,14 +270,31 @@ func TestCallTracer(t *testing.T) { t.Fatalf("failed to unmarshal trace result: %v", err) } - if !reflect.DeepEqual(ret, test.Result) { - diffs := deep.Equal(ret, test.Result) - t.Log(len(diffs), "diffs") - for _, d := range diffs { - t.Log(d) - } - t.Fatalf("trace mismatch: \nhave %+v\nwant %+v\nconfig: %v", ret, test.Result, test.Genesis.Config) + if !jsonEqual(ret, test.Result) { + // uncomment this for easier debugging + //have, _ := json.MarshalIndent(ret, "", " ") + //want, _ := json.MarshalIndent(test.Result, "", " ") + //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) + t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) } }) } } + +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqual(x, y interface{}) bool { + xTrace := new(callTrace) + yTrace := new(callTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false + } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} diff --git a/go.mod b/go.mod old mode 100644 new mode 100755 index eda425fd52..92ddfbb6e5 --- a/go.mod +++ b/go.mod @@ -26,9 +26,8 @@ require ( github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect github.com/go-stack/stack v1.8.0 github.com/go-test/deep v1.0.5 - github.com/golang/protobuf v1.3.2 + github.com/golang/protobuf v1.4.2 github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 - github.com/google/go-cmp v0.3.1 // indirect github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 github.com/hashicorp/golang-lru v0.5.4 @@ -46,8 +45,6 @@ require ( github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c - github.com/onsi/ginkgo v1.8.0 // indirect - github.com/onsi/gomega v1.5.0 // indirect github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/prometheus/tsdb v0.7.1 @@ -61,20 +58,19 @@ require ( github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect github.com/stretchr/testify v1.4.0 - github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca github.com/tidwall/gjson v1.3.5 github.com/tidwall/pretty v1.0.0 github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect + golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd - golang.org/x/text v0.3.2 + golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 + golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 gotest.tools v2.2.0+incompatible // indirect - storj.io/uplink v1.1.2 ) diff --git a/go.sum b/go.sum old mode 100644 new mode 100755 index 35b4258a62..4de62984bd --- a/go.sum +++ b/go.sum @@ -57,20 +57,15 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= -github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk= -github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -112,6 +107,8 @@ github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepB github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -139,8 +136,14 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 h1:lMm2hD9Fy0ynom5+85/pbdkiYcBqM1JWmhpAXLmy0fw= github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -148,8 +151,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -191,7 +195,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= @@ -247,8 +250,6 @@ github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -266,17 +267,21 @@ github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hz github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -322,11 +327,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI= -github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= -github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -355,8 +355,8 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ= github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -368,26 +368,15 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= -github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= -github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo= -github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -395,8 +384,6 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -434,9 +421,10 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -444,7 +432,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -462,14 +449,19 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -491,6 +483,9 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -513,12 +508,18 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -534,8 +535,9 @@ gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHO gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -543,9 +545,3 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -storj.io/common v0.0.0-20200611114417-9a3d012fdb62 h1:y8vGNQ0HjtD79G8MfCwbs6hct40tSBoDaOnsxWOZpU4= -storj.io/common v0.0.0-20200611114417-9a3d012fdb62/go.mod h1:6S6Ub92/BB+ofU7hbyPcm96b4Q1ayyN0HLog+3u+wGc= -storj.io/drpc v0.0.12 h1:4ei1M4cnWlYxcQheX0Dg4+c12zCD+oJqfweVQVWarsA= -storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= -storj.io/uplink v1.1.2 h1:r0EyoEDlAvqWd6SZDG10w0k2+CjSMi4wAq5J1Sw8y9Y= -storj.io/uplink v1.1.2/go.mod h1:UkdYN/dfSgv+d8fBUoZTrX2oLdj9gzX6Q7tp3CojgKA= diff --git a/graphql/graphql.go b/graphql/graphql.go index 1479ae7fdb..ad0e4453d4 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1044,6 +1044,10 @@ func (r *Resolver) ProtocolVersion(ctx context.Context) (int32, error) { return int32(r.backend.ProtocolVersion()), nil } +func (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) { + return hexutil.Big(*r.backend.ChainConfig().GetChainID()), nil +} + // SyncState represents the synchronisation status returned from the `syncing` accessor. type SyncState struct { progress ethereum.SyncProgress diff --git a/graphql/schema.go b/graphql/schema.go index 5dec10db20..d7b253f227 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -314,6 +314,8 @@ const schema string = ` protocolVersion: Int! # Syncing returns information on the current synchronisation state. syncing: SyncState + # ChainID returns the current chain ID for transaction replay protection. + chainID: BigInt! } type Mutation { diff --git a/les/odr_requests.go b/les/odr_requests.go index 8c1e0102f5..3cc55c98d8 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -116,7 +116,7 @@ func (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions)) { + if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), new(trie.Trie)) { return errTxHashMismatch } if r.Header.UncleHash != types.CalcUncleHash(body.Uncles) { @@ -174,7 +174,7 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.ReceiptHash != types.DeriveSha(receipt) { + if r.Header.ReceiptHash != types.DeriveSha(receipt, new(trie.Trie)) { return errReceiptHashMismatch } // Validations passed, store and return diff --git a/les/server_handler.go b/les/server_handler.go index c474363232..463f51cb43 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -489,7 +489,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { p.bumpInvalid() continue } - code, err := triedb.Node(common.BytesToHash(account.CodeHash)) + code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) if err != nil { p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) continue diff --git a/light/odr.go b/light/odr.go index 1ea98ca5aa..0b854b0b6c 100644 --- a/light/odr.go +++ b/light/odr.go @@ -101,7 +101,7 @@ type CodeRequest struct { // StoreResult stores the retrieved data in local database func (req *CodeRequest) StoreResult(db ethdb.Database) { - db.Put(req.Hash[:], req.Data) + rawdb.WriteCode(db, req.Hash, req.Data) } // BlockRequest is the ODR request type for retrieving block bodies diff --git a/light/odr_test.go b/light/odr_test.go index e75e00d8ef..14493107f3 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -89,7 +89,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { t.Prove(req.Key, 0, nodes) req.Proof = nodes case *CodeRequest: - req.Data, _ = odr.sdb.Get(req.Hash[:]) + req.Data = rawdb.ReadCode(odr.sdb, req.Hash) } req.StoreResult(odr.ldb) return nil diff --git a/light/trie.go b/light/trie.go index 0d69e74e21..3eb05f4a3f 100644 --- a/light/trie.go +++ b/light/trie.go @@ -22,6 +22,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -70,7 +71,8 @@ func (db *odrDatabase) ContractCode(addrHash, codeHash common.Hash) ([]byte, err if codeHash == sha3Nil { return nil, nil } - if code, err := db.backend.Database().Get(codeHash[:]); err == nil { + code := rawdb.ReadCode(db.backend.Database(), codeHash) + if len(code) != 0 { return code, nil } id := *db.id diff --git a/metrics/ewma.go b/metrics/ewma.go index 57c949e7d4..039286493e 100644 --- a/metrics/ewma.go +++ b/metrics/ewma.go @@ -4,6 +4,7 @@ import ( "math" "sync" "sync/atomic" + "time" ) // EWMAs continuously calculate an exponentially-weighted moving average @@ -85,7 +86,7 @@ type StandardEWMA struct { func (a *StandardEWMA) Rate() float64 { a.mutex.Lock() defer a.mutex.Unlock() - return a.rate * float64(1e9) + return a.rate * float64(time.Second) } // Snapshot returns a read-only copy of the EWMA. @@ -98,7 +99,7 @@ func (a *StandardEWMA) Snapshot() EWMA { func (a *StandardEWMA) Tick() { count := atomic.LoadInt64(&a.uncounted) atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) + instantRate := float64(count) / float64(5*time.Second) a.mutex.Lock() defer a.mutex.Unlock() if a.init { diff --git a/metrics/meter.go b/metrics/meter.go index 58d170fae0..60ae919d04 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -2,6 +2,7 @@ package metrics import ( "sync" + "sync/atomic" "time" ) @@ -100,6 +101,11 @@ func NewRegisteredMeterForced(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { + // WARNING: The `temp` field is accessed atomically. + // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is + // guaranteed to be so aligned, so take advantage of that. For more information, + // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG. + temp int64 count int64 rate1, rate5, rate15, rateMean float64 } @@ -149,7 +155,7 @@ func (NilMeter) Rate1() float64 { return 0.0 } // Rate5 is a no-op. func (NilMeter) Rate5() float64 { return 0.0 } -// Rate15is a no-op. +// Rate15 is a no-op. func (NilMeter) Rate15() float64 { return 0.0 } // RateMean is a no-op. @@ -167,7 +173,7 @@ type StandardMeter struct { snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time - stopped bool + stopped uint32 } func newStandardMeter() *StandardMeter { @@ -182,11 +188,8 @@ func newStandardMeter() *StandardMeter { // Stop stops the meter, Mark() will be a no-op if you use it after being stopped. func (m *StandardMeter) Stop() { - m.lock.Lock() - stopped := m.stopped - m.stopped = true - m.lock.Unlock() - if !stopped { + stopped := atomic.SwapUint32(&m.stopped, 1) + if stopped != 1 { arbiter.Lock() delete(arbiter.meters, m) arbiter.Unlock() @@ -194,57 +197,45 @@ func (m *StandardMeter) Stop() { } // Count returns the number of events recorded. +// It updates the meter to be as accurate as possible func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count + m.lock.Lock() + defer m.lock.Unlock() + m.updateMeter() + return m.snapshot.count } // Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - if m.stopped { - return - } - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() + atomic.AddInt64(&m.snapshot.temp, n) } // Rate1 returns the one-minute moving average rate of events per second. func (m *StandardMeter) Rate1() float64 { m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 + defer m.lock.RUnlock() + return m.snapshot.rate1 } // Rate5 returns the five-minute moving average rate of events per second. func (m *StandardMeter) Rate5() float64 { m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 + defer m.lock.RUnlock() + return m.snapshot.rate5 } // Rate15 returns the fifteen-minute moving average rate of events per second. func (m *StandardMeter) Rate15() float64 { m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 + defer m.lock.RUnlock() + return m.snapshot.rate15 } // RateMean returns the meter's mean rate of events per second. func (m *StandardMeter) RateMean() float64 { m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean + defer m.lock.RUnlock() + return m.snapshot.rateMean } // Snapshot returns a read-only copy of the meter. @@ -264,9 +255,19 @@ func (m *StandardMeter) updateSnapshot() { snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() } +func (m *StandardMeter) updateMeter() { + // should only run with write lock held on m.lock + n := atomic.SwapInt64(&m.snapshot.temp, 0) + m.snapshot.count += n + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) +} + func (m *StandardMeter) tick() { m.lock.Lock() defer m.lock.Unlock() + m.updateMeter() m.a1.Tick() m.a5.Tick() m.a15.Tick() @@ -282,7 +283,7 @@ type meterArbiter struct { ticker *time.Ticker } -var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} +var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})} // Ticks meters on the scheduled interval func (ma *meterArbiter) tick() { diff --git a/metrics/meter_test.go b/metrics/meter_test.go index 28472253e8..b3f6cb8c0c 100644 --- a/metrics/meter_test.go +++ b/metrics/meter_test.go @@ -17,7 +17,7 @@ func TestGetOrRegisterMeter(t *testing.T) { r := NewRegistry() NewRegisteredMeter("foo", r).Mark(47) if m := GetOrRegisterMeter("foo", r); m.Count() != 47 { - t.Fatal(m) + t.Fatal(m.Count()) } } @@ -29,10 +29,11 @@ func TestMeterDecay(t *testing.T) { defer ma.ticker.Stop() m := newStandardMeter() ma.meters[m] = struct{}{} - go ma.tick() m.Mark(1) + ma.tickMeters() rateMean := m.RateMean() time.Sleep(100 * time.Millisecond) + ma.tickMeters() if m.RateMean() >= rateMean { t.Error("m.RateMean() didn't decrease") } @@ -72,3 +73,19 @@ func TestMeterZero(t *testing.T) { t.Errorf("m.Count(): 0 != %v\n", count) } } + +func TestMeterRepeat(t *testing.T) { + m := NewMeter() + for i := 0; i < 101; i++ { + m.Mark(int64(i)) + } + if count := m.Count(); count != 5050 { + t.Errorf("m.Count(): 5050 != %v\n", count) + } + for i := 0; i < 101; i++ { + m.Mark(int64(i)) + } + if count := m.Count(); count != 10100 { + t.Errorf("m.Count(): 10100 != %v\n", count) + } +} diff --git a/miner/worker.go b/miner/worker.go index 11b25bb0c3..99403f531d 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -712,6 +713,7 @@ func (w *worker) updateSnapshot() { w.current.txs, uncles, w.current.receipts, + new(trie.Trie), ) w.snapshotState = w.current.state.Copy() diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 40b271e6d9..9ab4a71ce7 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -104,9 +104,7 @@ func (it *lookup) startQueries() bool { // The first query returns nodes from the local table. if it.queries == -1 { - it.tab.mutex.Lock() - closest := it.tab.closest(it.result.target, bucketSize, false) - it.tab.mutex.Unlock() + closest := it.tab.findnodeByID(it.result.target, bucketSize, false) // Avoid finishing the lookup too quickly if table is empty. It'd be better to wait // for the table to fill in this case, but there is no good mechanism for that // yet. @@ -150,11 +148,14 @@ func (it *lookup) query(n *node, reply chan<- []*node) { } else if len(r) == 0 { fails++ it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) - it.tab.log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "results", len(r), "err", err) - if fails >= maxFindnodeFailures { - it.tab.log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) + // Remove the node from the local table if it fails to return anything useful too + // many times, but only if there are enough other nodes in the bucket. + dropped := false + if fails >= maxFindnodeFailures && it.tab.bucketLen(n.ID()) >= bucketSize/2 { + dropped = true it.tab.delete(n) } + it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "failcount", fails, "dropped", dropped, "err", err) } else if fails > 0 { // Reset failure counter because it counts _consecutive_ failures. it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 010fa47f52..56ab9368a5 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -392,22 +392,35 @@ func (tab *Table) copyLiveNodes() { } } -// closest returns the n nodes in the table that are closest to the -// given id. The caller must hold tab.mutex. -func (tab *Table) closest(target enode.ID, nresults int, checklive bool) *nodesByDistance { - // This is a very wasteful way to find the closest nodes but - // obviously correct. I believe that tree-based buckets would make - // this easier to implement efficiently. - close := &nodesByDistance{target: target} +// findnodeByID returns the n nodes in the table that are closest to the given id. +// This is used by the FINDNODE/v4 handler. +// +// The preferLive parameter says whether the caller wants liveness-checked results. If +// preferLive is true and the table contains any verified nodes, the result will not +// contain unverified nodes. However, if there are no verified nodes at all, the result +// will contain unverified nodes. +func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + // Scan all buckets. There might be a better way to do this, but there aren't that many + // buckets, so this solution should be fine. The worst-case complexity of this loop + // is O(tab.len() * nresults). + nodes := &nodesByDistance{target: target} + liveNodes := &nodesByDistance{target: target} for _, b := range &tab.buckets { for _, n := range b.entries { - if checklive && n.livenessChecks == 0 { - continue + nodes.push(n, nresults) + if preferLive && n.livenessChecks > 0 { + liveNodes.push(n, nresults) } - close.push(n, nresults) } } - return close + + if preferLive && len(liveNodes.entries) > 0 { + return liveNodes + } + return nodes } // len returns the number of nodes in the table. @@ -421,6 +434,14 @@ func (tab *Table) len() (n int) { return n } +// bucketLen returns the number of nodes in the bucket for the given ID. +func (tab *Table) bucketLen(id enode.ID) int { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + return len(tab.bucket(id).entries) +} + // bucket returns the bucket for the given node ID hash. func (tab *Table) bucket(id enode.ID) *bucket { d := enode.LogDist(tab.self().ID(), id) diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 562691e5b9..5f40c967fd 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -190,7 +190,7 @@ func checkIPLimitInvariant(t *testing.T, tab *Table) { } } -func TestTable_closest(t *testing.T) { +func TestTable_findnodeByID(t *testing.T) { t.Parallel() test := func(test *closeTest) bool { @@ -202,7 +202,7 @@ func TestTable_closest(t *testing.T) { fillTable(tab, test.All) // check that closest(Target, N) returns nodes - result := tab.closest(test.Target, test.N, false).entries + result := tab.findnodeByID(test.Target, test.N, false).entries if hasDuplicates(result) { t.Errorf("result contains duplicates") return false diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index e5b6939a48..ad23eee6b4 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -324,7 +324,16 @@ func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubke Target: target, Expiration: uint64(time.Now().Add(expiration).Unix()), }) - return nodes, <-rm.errc + // Ensure that callers don't see a timeout if the node actually responded. Since + // findnode can receive more than one neighbors response, the reply matcher will be + // active until the remote node sends enough nodes. If the remote end doesn't have + // enough nodes the reply matcher will time out waiting for the second reply, but + // there's no need for an error in that case. + err := <-rm.errc + if err == errTimeout && rm.reply != nil { + err = nil + } + return nodes, err } // RequestENR sends enrRequest to the given node and waits for a response. @@ -453,9 +462,9 @@ func (t *UDPv4) loop() { if p.from == r.from && p.ptype == r.data.Kind() && p.ip.Equal(r.ip) { ok, requestDone := p.callback(r.data) matched = matched || ok + p.reply = r.data // Remove the matcher if callback indicates that all replies have been received. if requestDone { - p.reply = r.data p.errc <- nil plist.Remove(el) } @@ -715,9 +724,7 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno // Determine closest nodes. target := enode.ID(crypto.Keccak256Hash(req.Target[:])) - t.tab.mutex.Lock() - closest := t.tab.closest(target, bucketSize, true).entries - t.tab.mutex.Unlock() + closest := t.tab.findnodeByID(target, bucketSize, true).entries // Send neighbors in chunks with at most maxNeighbors per packet // to stay below the packet size limit. diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 2b0a65736c..262e3f0ba3 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -22,6 +22,7 @@ import ( crand "crypto/rand" "encoding/binary" "errors" + "fmt" "io" "math/rand" "net" @@ -277,7 +278,7 @@ func TestUDPv4_findnode(t *testing.T) { test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.IP, time.Now()) // check that closest neighbors are returned. - expected := test.table.closest(testTarget.ID(), bucketSize, true) + expected := test.table.findnodeByID(testTarget.ID(), bucketSize, true) test.packetIn(nil, &v4wire.Findnode{Target: testTarget, Expiration: futureExp}) waitNeighbors := func(want []*node) { test.waitPacketOut(func(p *v4wire.Neighbors, to *net.UDPAddr, hash []byte) { @@ -493,6 +494,91 @@ func TestUDPv4_EIP868(t *testing.T) { }) } +// This test verifies that a small network of nodes can boot up into a healthy state. +func TestUDPv4_smallNetConvergence(t *testing.T) { + t.Parallel() + + // Start the network. + nodes := make([]*UDPv4, 4) + for i := range nodes { + var cfg Config + if i > 0 { + bn := nodes[0].Self() + cfg.Bootnodes = []*enode.Node{bn} + } + nodes[i] = startLocalhostV4(t, cfg) + defer nodes[i].Close() + } + + // Run through the iterator on all nodes until + // they have all found each other. + status := make(chan error, len(nodes)) + for i := range nodes { + node := nodes[i] + go func() { + found := make(map[enode.ID]bool, len(nodes)) + it := node.RandomNodes() + for it.Next() { + found[it.Node().ID()] = true + if len(found) == len(nodes) { + status <- nil + return + } + } + status <- fmt.Errorf("node %s didn't find all nodes", node.Self().ID().TerminalString()) + }() + } + + // Wait for all status reports. + timeout := time.NewTimer(30 * time.Second) + defer timeout.Stop() + for received := 0; received < len(nodes); { + select { + case <-timeout.C: + for _, node := range nodes { + node.Close() + } + case err := <-status: + received++ + if err != nil { + t.Error("ERROR:", err) + return + } + } + } +} + +func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 { + t.Helper() + + cfg.PrivateKey = newkey() + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, cfg.PrivateKey) + + // Prefix logs with node ID. + lprefix := fmt.Sprintf("(%s)", ln.ID().TerminalString()) + lfmt := log.TerminalFormat(false) + cfg.Log = testlog.Logger(t, log.LvlTrace) + cfg.Log.SetHandler(log.FuncHandler(func(r *log.Record) error { + t.Logf("%s %s", lprefix, lfmt.Format(r)) + return nil + })) + + // Listen. + socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}}) + if err != nil { + t.Fatal(err) + } + realaddr := socket.LocalAddr().(*net.UDPAddr) + ln.SetStaticIP(realaddr.IP) + ln.SetFallbackUDP(realaddr.Port) + udp, err := ListenV4(socket, ln, cfg) + if err != nil { + t.Fatal(err) + } + return udp +} + // dgramPipe is a fake UDP socket. It queues all sent datagrams. type dgramPipe struct { mu *sync.Mutex diff --git a/params/config.go b/params/config.go index ce8856f0f2..653ef0527a 100644 --- a/params/config.go +++ b/params/config.go @@ -74,10 +74,10 @@ var ( // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 323, - SectionHead: common.HexToHash("0x2cab24d8502fb936799e4775c43b66eaec2981c9458f4676129b38bbd535a061"), - CHTRoot: common.HexToHash("0x30e9008a3e038a0e6b6d93cfc1726bdfdc40590a75e6dbe4feeafee2d7281ae6"), - BloomRoot: common.HexToHash("0x0fa8b4a19b77a454d1994864520bb8b427c829ac76967956c4ddddefe0407bf1"), + SectionIndex: 326, + SectionHead: common.HexToHash("0xbdec9f7056159360d64d6488ee11a0db574a67757cddd6fffd6719121d5733a5"), + CHTRoot: common.HexToHash("0xf9d2617f8e038b824a256025f01af3b3da681987df29dbfe718ad4c6c8a0875d"), + BloomRoot: common.HexToHash("0x712016984cfb66c165fdaf05c6a4aa89f08e4bb66fa77b199f2878fff4232d78"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -113,10 +113,10 @@ var ( // RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. RopstenTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 257, - SectionHead: common.HexToHash("0x2193034371f50352e412a763b14d53ffafbb5b9c12d1835516fb04f5a3498a9c"), - CHTRoot: common.HexToHash("0x9a4601d7893e469f4987a3ac9450b4953ca7302689770d1840542a3fe0a8c7c1"), - BloomRoot: common.HexToHash("0x198d72f8a47694682367981ae8d9988d6b30c4f433425399726dc53357a79e6f"), + SectionIndex: 260, + SectionHead: common.HexToHash("0xdcf714d033b8be3f0786515649d76e526157f811e5ae89c59dbfd53029d0d165"), + CHTRoot: common.HexToHash("0x987759454d404cd393a6a7743da64610076f167e989ec2cf9e0c0be6578d1304"), + BloomRoot: common.HexToHash("0xb8ee6d34cc30d61410717e2dc1af3294bc056f4b32a5eed5f6f386a8c1daa2b1"), } // RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle. @@ -157,10 +157,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 212, - SectionHead: common.HexToHash("0x47494484fe9696dfc7a351575b6b333566c79d2fad2a1f586165f58f3c2a553b"), - CHTRoot: common.HexToHash("0x6ec314ba06ce6e46c1be675dabbabc6fae464d394253e1647ba73480eb46d11d"), - BloomRoot: common.HexToHash("0x4ad93f0ddbe55baae629853971c6fd0de201ddef9f04892c05a1258fbacc88ca"), + SectionIndex: 214, + SectionHead: common.HexToHash("0x297b4daf21db636e76555c9d3e302d79a8efe3a3434143b9bcf61187ce8abcb1"), + CHTRoot: common.HexToHash("0x602044234a4ba8534286240200cde6e5797ae40151cbdd2dbf8eb8c0486a2c63"), + BloomRoot: common.HexToHash("0x9ccf6840ecc541b290c7b9f19edcba3e5f39206b05cd4ae5a7754040783d47d9"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -199,10 +199,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &ctypes.TrustedCheckpoint{ - SectionIndex: 96, - SectionHead: common.HexToHash("0xa038699854f0aa1624da55646e0219df4e319738319e65c38a270edcb7819a2a"), - CHTRoot: common.HexToHash("0x388fd4c8b58b76b76c2575211f5a6b83bce2be7ce628a750f67e4853680fa76a"), - BloomRoot: common.HexToHash("0xa8a61388f1240ea1d32413be1bdb056352c13e59278b6b31467bca77fb903fbc"), + SectionIndex: 99, + SectionHead: common.HexToHash("0xc9f09369acd657d5f77e6a389a68f673bf909ad98c269800c08229d75c1a90e3"), + CHTRoot: common.HexToHash("0x523218630348e98fa9f4e7fc3054aff717982d79c700cbecf5730c1479f21c6e"), + BloomRoot: common.HexToHash("0x75219ad4a3ec4682b89dd248ee56b52ef26fe577a426f4813297550deb5c4cb2"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. diff --git a/tests/testdata b/tests/testdata index dcf1bba368..4553e8beae 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit dcf1bba3687315057ff5777c8432f7a1cb478585 +Subproject commit 4553e8beaeab130a21e2a1d3ef063fe48606413a diff --git a/trie/committer.go b/trie/committer.go index 2f3d2a4633..fc8b7ceda5 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -226,12 +226,12 @@ func (c *committer) commitLoop(db *Database) { switch n := n.(type) { case *shortNode: if child, ok := n.Val.(valueNode); ok { - c.onleaf(child, hash) + c.onleaf(nil, child, hash) } case *fullNode: for i := 0; i < 16; i++ { if child, ok := n.Children[i].(valueNode); ok { - c.onleaf(child, hash) + c.onleaf(nil, child, hash) } } } diff --git a/trie/database.go b/trie/database.go index 0e9f306e63..fa8906b7a3 100644 --- a/trie/database.go +++ b/trie/database.go @@ -27,6 +27,7 @@ import ( "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -57,15 +58,6 @@ var ( memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) ) -// secureKeyPrefix is the database key prefix used to store trie node preimages. -var secureKeyPrefix = []byte("secure-key-") - -// secureKeyPrefixLength is the length of the above prefix -const secureKeyPrefixLength = 11 - -// secureKeyLength is the length of the above prefix + 32byte hash. -const secureKeyLength = secureKeyPrefixLength + 32 - // Database is an intermediate write layer between the trie data structures and // the disk database. The aim is to accumulate trie writes in-memory and only // periodically flush a couple tries to disk, garbage collecting the remainder. @@ -78,7 +70,7 @@ type Database struct { diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs - dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes + dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes oldest common.Hash // Oldest tracked node, flush-list head newest common.Hash // Newest tracked node, flush-list tail @@ -139,8 +131,8 @@ type rawShortNode struct { func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } -// cachedNode is all the information we know about a single cached node in the -// memory database write layer. +// cachedNode is all the information we know about a single cached trie node +// in the memory database write layer. type cachedNode struct { node node // Cached collapsed trie node, or raw rlp data size uint16 // Byte size of the useful cached data @@ -161,8 +153,8 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) // reference map. const cachedNodeChildrenSize = 48 -// rlp returns the raw rlp encoded blob of the cached node, either directly from -// the cache, or by regenerating it from the collapsed node. +// rlp returns the raw rlp encoded blob of the cached trie node, either directly +// from the cache, or by regenerating it from the collapsed node. func (n *cachedNode) rlp() []byte { if node, ok := n.node.(rawNode); ok { return node @@ -183,9 +175,9 @@ func (n *cachedNode) obj(hash common.Hash) node { return expandNode(hash[:], n.node) } -// forChilds invokes the callback for all the tracked children of this node, -// both the implicit ones from inside the node as well as the explicit ones -//from outside the node. +// forChilds invokes the callback for all the tracked children of this node, +// both the implicit ones from inside the node as well as the explicit ones +// from outside the node. func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { for child := range n.children { onChild(child) @@ -305,25 +297,14 @@ func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string) } // DiskDB retrieves the persistent storage backing the trie database. -func (db *Database) DiskDB() ethdb.KeyValueReader { +func (db *Database) DiskDB() ethdb.KeyValueStore { return db.diskdb } -// InsertBlob writes a new reference tracked blob to the memory database if it's -// yet unknown. This method should only be used for non-trie nodes that require -// reference counting, since trie nodes are garbage collected directly through -// their embedded children. -func (db *Database) InsertBlob(hash common.Hash, blob []byte) { - db.lock.Lock() - defer db.lock.Unlock() - - db.insert(hash, len(blob), rawNode(blob)) -} - -// insert inserts a collapsed trie node into the memory database. This method is -// a more generic version of InsertBlob, supporting both raw blob insertions as -// well ex trie node insertions. The blob size must be specified to allow proper -// size tracking. +// insert inserts a collapsed trie node into the memory database. +// The blob size must be specified to allow proper size tracking. +// All nodes inserted by this function will be reference tracked +// and in theory should only used for **trie nodes** insertion. func (db *Database) insert(hash common.Hash, size int, node node) { // If the node's already cached, skip if _, ok := db.dirties[hash]; ok { @@ -430,39 +411,30 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { memcacheDirtyMissMeter.Mark(1) // Content unavailable in memory, attempt to retrieve from disk - enc, err := db.diskdb.Get(hash[:]) - if err == nil && enc != nil { + enc := rawdb.ReadTrieNode(db.diskdb, hash) + if len(enc) != 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) memcacheCleanWriteMeter.Mark(int64(len(enc))) } + return enc, nil } - return enc, err + return nil, errors.New("not found") } // preimage retrieves a cached trie node pre-image from memory. If it cannot be // found cached, the method queries the persistent database for the content. -func (db *Database) preimage(hash common.Hash) ([]byte, error) { +func (db *Database) preimage(hash common.Hash) []byte { // Retrieve the node from cache if available db.lock.RLock() preimage := db.preimages[hash] db.lock.RUnlock() if preimage != nil { - return preimage, nil + return preimage } - // Content unavailable in memory, attempt to retrieve from disk - return db.diskdb.Get(secureKey(hash)) -} - -// secureKey returns the database key for the preimage of key (as a newly -// allocated byte-slice) -func secureKey(hash common.Hash) []byte { - buf := make([]byte, secureKeyLength) - copy(buf, secureKeyPrefix) - copy(buf[secureKeyPrefixLength:], hash[:]) - return buf + return rawdb.ReadPreimage(db.diskdb, hash) } // Nodes retrieves the hashes of all the nodes cached within the memory database. @@ -482,6 +454,9 @@ func (db *Database) Nodes() []common.Hash { } // Reference adds a new reference from a parent node to a child node. +// This function is used to add reference between internal trie node +// and external node(e.g. storage trie root), all internal trie nodes +// are referenced together by database itself. func (db *Database) Reference(child common.Hash, parent common.Hash) { db.lock.Lock() defer db.lock.Unlock() @@ -604,27 +579,16 @@ func (db *Database) Cap(limit common.StorageSize) error { size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) - // We reuse an ephemeral buffer for the keys. The batch Put operation - // copies it internally, so we can reuse it. - var keyBuf [secureKeyLength]byte - copy(keyBuf[:], secureKeyPrefix) - // If the preimage cache got large enough, push to disk. If it's still small // leave for later to deduplicate writes. flushPreimages := db.preimagesSize > 4*1024*1024 if flushPreimages { - for hash, preimage := range db.preimages { - copy(keyBuf[secureKeyPrefixLength:], hash[:]) - if err := batch.Put(keyBuf[:], preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { return err } - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } + batch.Reset() } } // Keep committing nodes from the flush-list until we're below allowance @@ -632,9 +596,8 @@ func (db *Database) Cap(limit common.StorageSize) error { for size > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.dirties[oldest] - if err := batch.Put(oldest[:], node.rlp()); err != nil { - return err - } + rawdb.WriteTrieNode(batch, oldest, node.rlp()) + // If we exceeded the ideal batch size, commit and reset if batch.ValueSize() >= ethdb.IdealBatchSize { if err := batch.Write(); err != nil { @@ -662,8 +625,7 @@ func (db *Database) Cap(limit common.StorageSize) error { defer db.lock.Unlock() if flushPreimages { - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 } for db.oldest != oldest { node := db.dirties[db.oldest] @@ -706,25 +668,13 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H start := time.Now() batch := db.diskdb.NewBatch() - // We reuse an ephemeral buffer for the keys. The batch Put operation - // copies it internally, so we can reuse it. - var keyBuf [secureKeyLength]byte - copy(keyBuf[:], secureKeyPrefix) - // Move all of the accumulated preimages into a write batch - for hash, preimage := range db.preimages { - copy(keyBuf[secureKeyPrefixLength:], hash[:]) - if err := batch.Put(keyBuf[:], preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { return err } - // If the batch is too large, flush to disk - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } + batch.Reset() } // Since we're going to replay trie node writes into the clean cache, flush out // any batched pre-images before continuing. @@ -754,8 +704,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H batch.Reset() // Reset the storage counters and bumpd metrics - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 memcacheCommitTimeTimer.Update(time.Since(start)) memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) @@ -791,13 +740,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane if err != nil { return err } - if err := batch.Put(hash[:], node.rlp()); err != nil { - return err - } + // If we've reached an optimal batch size, commit and start over + rawdb.WriteTrieNode(batch, hash, node.rlp()) if callback != nil { callback(hash) } - // If we've reached an optimal batch size, commit and start over if batch.ValueSize() >= ethdb.IdealBatchSize { if err := batch.Write(); err != nil { return err diff --git a/trie/secure_trie.go b/trie/secure_trie.go index bd8e51d989..ae1bbc6aa9 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -130,8 +130,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte { if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { return key } - key, _ := t.trie.db.preimage(common.BytesToHash(shaKey)) - return key + return t.trie.db.preimage(common.BytesToHash(shaKey)) } // Commit writes all nodes and the secure hash pre-images to the trie's database. diff --git a/trie/sync.go b/trie/sync.go index 978e76799a..147307fe71 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" ) @@ -33,21 +34,25 @@ var ErrNotRequested = errors.New("not requested") // node it already processed previously. var ErrAlreadyProcessed = errors.New("already processed") +// maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The +// role of this value is to limit the number of trie nodes that get expanded in +// memory if the node was configured with a significant number of peers. +const maxFetchesPerDepth = 16384 + // request represents a scheduled or already in-flight state retrieval request. type request struct { + path []byte // Merkle path leading to this node for prioritization hash common.Hash // Hash of the node data content to retrieve data []byte // Data content of the node, cached until all subtrees complete - raw bool // Whether this is a raw entry (code) or a trie node + code bool // Whether this is a code entry parents []*request // Parent state nodes referencing this entry (notify all upon completion) - depth int // Depth level within the trie the node is located to prioritise DFS deps int // Number of dependencies before allowed to commit this node callback LeafCallback // Callback to invoke if a leaf node it reached on this branch } -// SyncResult is a simple list to return missing nodes along with their request -// hashes. +// SyncResult is a response with requested data along with it's hash. type SyncResult struct { Hash common.Hash // Hash of the originally unknown trie node Data []byte // Data content of the retrieved node @@ -56,25 +61,41 @@ type SyncResult struct { // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - batch map[common.Hash][]byte // In-memory membatch of recently completed items + nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes + codes map[common.Hash][]byte // In-memory membatch of recently completed codes } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - batch: make(map[common.Hash][]byte), + nodes: make(map[common.Hash][]byte), + codes: make(map[common.Hash][]byte), } } +// hasNode reports the trie node with specific hash is already cached. +func (batch *syncMemBatch) hasNode(hash common.Hash) bool { + _, ok := batch.nodes[hash] + return ok +} + +// hasCode reports the contract code with specific hash is already cached. +func (batch *syncMemBatch) hasCode(hash common.Hash) bool { + _, ok := batch.codes[hash] + return ok +} + // Sync is the main state trie synchronisation scheduler, which provides yet // unknown trie hashes to retrieve, accepts node data associated with said hashes // and reconstructs the trie step by step until all is done. type Sync struct { database ethdb.KeyValueReader // Persistent database to check for existing entries membatch *syncMemBatch // Memory buffer to avoid frequent database writes - requests map[common.Hash]*request // Pending requests pertaining to a key hash + nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash + codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash queue *prque.Prque // Priority queue with the pending requests - bloom *SyncBloom // Bloom filter for fast node existence checks + fetches map[int]int // Number of active fetches per trie node depth + bloom *SyncBloom // Bloom filter for fast state existence checks } // NewSync creates a new trie data download scheduler. @@ -82,27 +103,31 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb ts := &Sync{ database: database, membatch: newSyncMemBatch(), - requests: make(map[common.Hash]*request), + nodeReqs: make(map[common.Hash]*request), + codeReqs: make(map[common.Hash]*request), queue: prque.New(nil), + fetches: make(map[int]int), bloom: bloom, } - ts.AddSubTrie(root, 0, common.Hash{}, callback) + ts.AddSubTrie(root, nil, common.Hash{}, callback) return ts } // AddSubTrie registers a new trie to the sync code, rooted at the designated parent. -func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) { +func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) { // Short circuit if the trie is empty or already known if root == emptyRoot { return } - if _, ok := s.membatch.batch[root]; ok { + if s.membatch.hasNode(root) { return } - if s.bloom.Contains(root[:]) { - // Bloom filter says this might be a duplicate, double check - blob, _ := s.database.Get(root[:]) - if local, err := decodeNode(root[:], blob); local != nil && err == nil { + if s.bloom == nil || s.bloom.Contains(root[:]) { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + blob := rawdb.ReadTrieNode(s.database, root) + if len(blob) > 0 { return } // False positive, bump fault meter @@ -110,13 +135,13 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb } // Assemble the new sub-trie sync request req := &request{ + path: path, hash: root, - depth: depth, callback: callback, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.requests[parent] + ancestor := s.nodeReqs[parent] if ancestor == nil { panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) } @@ -126,21 +151,25 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb s.schedule(req) } -// AddRawEntry schedules the direct retrieval of a state entry that should not be -// interpreted as a trie node, but rather accepted and stored into the database -// as is. This method's goal is to support misc state metadata retrievals (e.g. -// contract code). -func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { +// AddCodeEntry schedules the direct retrieval of a contract code that should not +// be interpreted as a trie node, but rather accepted and stored into the database +// as is. +func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { // Short circuit if the entry is empty or already known if hash == emptyState { return } - if _, ok := s.membatch.batch[hash]; ok { + if s.membatch.hasCode(hash) { return } - if s.bloom.Contains(hash[:]) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(hash[:]); ok { + if s.bloom == nil || s.bloom.Contains(hash[:]) { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, the blob is present for sure. + // Note we only check the existence with new code scheme, fast + // sync is expected to run with a fresh new node. Even there + // exists the code with legacy format, fetch and store with + // new scheme anyway. + if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { return } // False positive, bump fault meter @@ -148,13 +177,13 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { } // Assemble the new sub-trie sync request req := &request{ - hash: hash, - raw: true, - depth: depth, + path: path, + hash: hash, + code: true, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.requests[parent] + ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq if ancestor == nil { panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) } @@ -168,66 +197,80 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { func (s *Sync) Missing(max int) []common.Hash { var requests []common.Hash for !s.queue.Empty() && (max == 0 || len(requests) < max) { - requests = append(requests, s.queue.PopItem().(common.Hash)) + // Retrieve th enext item in line + item, prio := s.queue.Peek() + + // If we have too many already-pending tasks for this depth, throttle + depth := int(prio >> 56) + if s.fetches[depth] > maxFetchesPerDepth { + break + } + // Item is allowed to be scheduled, add it to the task list + s.queue.Pop() + s.fetches[depth]++ + requests = append(requests, item.(common.Hash)) } return requests } -// Process injects a batch of retrieved trie nodes data, returning if something -// was committed to the database and also the index of an entry if its processing -// failed. -func (s *Sync) Process(results []SyncResult) (bool, int, error) { - committed := false - - for i, item := range results { - // If the item was not requested, bail out - request := s.requests[item.Hash] - if request == nil { - return committed, i, ErrNotRequested - } - if request.data != nil { - return committed, i, ErrAlreadyProcessed - } - // If the item is a raw entry request, commit directly - if request.raw { - request.data = item.Data - s.commit(request) - committed = true - continue - } +// Process injects the received data for requested item. Note it can +// happpen that the single response commits two pending requests(e.g. +// there are two requests one for code and one for node but the hash +// is same). In this case the second response for the same hash will +// be treated as "non-requested" item or "already-processed" item but +// there is no downside. +func (s *Sync) Process(result SyncResult) error { + // If the item was not requested either for code or node, bail out + if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil { + return ErrNotRequested + } + // There is an pending code request for this data, commit directly + var filled bool + if req := s.codeReqs[result.Hash]; req != nil && req.data == nil { + filled = true + req.data = result.Data + s.commit(req) + } + // There is an pending node request for this data, fill it. + if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil { + filled = true // Decode the node data content and update the request - node, err := decodeNode(item.Hash[:], item.Data) + node, err := decodeNode(result.Hash[:], result.Data) if err != nil { - return committed, i, err + return err } - request.data = item.Data + req.data = result.Data // Create and schedule a request for all the children nodes - requests, err := s.children(request, node) + requests, err := s.children(req, node) if err != nil { - return committed, i, err - } - if len(requests) == 0 && request.deps == 0 { - s.commit(request) - committed = true - continue + return err } - request.deps += len(requests) - for _, child := range requests { - s.schedule(child) + if len(requests) == 0 && req.deps == 0 { + s.commit(req) + } else { + req.deps += len(requests) + for _, child := range requests { + s.schedule(child) + } } } - return committed, 0, nil + if !filled { + return ErrAlreadyProcessed + } + return nil } // Commit flushes the data stored in the internal membatch out to persistent // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw - for key, value := range s.membatch.batch { - if err := dbw.Put(key[:], value); err != nil { - return err - } + for key, value := range s.membatch.nodes { + rawdb.WriteTrieNode(dbw, key, value) + s.bloom.Add(key[:]) + } + for key, value := range s.membatch.codes { + rawdb.WriteCode(dbw, key, value) s.bloom.Add(key[:]) } // Drop the membatch data and return @@ -237,21 +280,34 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { // Pending returns the number of state entries currently pending for download. func (s *Sync) Pending() int { - return len(s.requests) + return len(s.nodeReqs) + len(s.codeReqs) } // schedule inserts a new state retrieval request into the fetch queue. If there // is already a pending request for this node, the new request will be discarded // and only a parent reference added to the old one. func (s *Sync) schedule(req *request) { + var reqset = s.nodeReqs + if req.code { + reqset = s.codeReqs + } // If we're already requesting this node, add a new reference and stop - if old, ok := s.requests[req.hash]; ok { + if old, ok := reqset[req.hash]; ok { old.parents = append(old.parents, req.parents...) return } - // Schedule the request for future retrieval - s.queue.Push(req.hash, int64(req.depth)) - s.requests[req.hash] = req + reqset[req.hash] = req + + // Schedule the request for future retrieval. This queue is shared + // by both node requests and code requests. It can happen that there + // is a trie node and code has same hash. In this case two elements + // with same hash and same or different depth will be pushed. But it's + // ok the worst case is the second response will be treated as duplicated. + prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents + for i := 0; i < 14 && i < len(req.path); i++ { + prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order + } + s.queue.Push(req.hash, prio) } // children retrieves all the missing children of a state trie entry for future @@ -259,23 +315,23 @@ func (s *Sync) schedule(req *request) { func (s *Sync) children(req *request, object node) ([]*request, error) { // Gather all the children of the node, irrelevant whether known or not type child struct { - node node - depth int + path []byte + node node } var children []child switch node := (object).(type) { case *shortNode: children = []child{{ - node: node.Val, - depth: req.depth + len(node.Key), + node: node.Val, + path: append(append([]byte(nil), req.path...), node.Key...), }} case *fullNode: for i := 0; i < 17; i++ { if node.Children[i] != nil { children = append(children, child{ - node: node.Children[i], - depth: req.depth + 1, + node: node.Children[i], + path: append(append([]byte(nil), req.path...), byte(i)), }) } } @@ -288,7 +344,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // Notify any external watcher of a new key/value node if req.callback != nil { if node, ok := (child.node).(valueNode); ok { - if err := req.callback(node, req.hash); err != nil { + if err := req.callback(req.path, node, req.hash); err != nil { return nil, err } } @@ -297,12 +353,14 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { if node, ok := (child.node).(hashNode); ok { // Try to resolve the node from the local database hash := common.BytesToHash(node) - if _, ok := s.membatch.batch[hash]; ok { + if s.membatch.hasNode(hash) { continue } - if s.bloom.Contains(node) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(node); ok { + if s.bloom == nil || s.bloom.Contains(node) { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + if blob := rawdb.ReadTrieNode(s.database, common.BytesToHash(node)); len(blob) > 0 { continue } // False positive, bump fault meter @@ -310,9 +368,9 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { } // Locally unknown node, schedule for retrieval requests = append(requests, &request{ + path: child.path, hash: hash, parents: []*request{req}, - depth: child.depth, callback: req.callback, }) } @@ -325,10 +383,15 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // committed themselves. func (s *Sync) commit(req *request) (err error) { // Write the node content to the membatch - s.membatch.batch[req.hash] = req.data - - delete(s.requests, req.hash) - + if req.code { + s.membatch.codes[req.hash] = req.data + delete(s.codeReqs, req.hash) + s.fetches[len(req.path)]-- + } else { + s.membatch.nodes[req.hash] = req.data + delete(s.nodeReqs, req.hash) + s.fetches[len(req.path)]-- + } // Check all parents for completion for _, parent := range req.parents { parent.deps-- diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go index 3108b05935..89f61d66d9 100644 --- a/trie/sync_bloom.go +++ b/trie/sync_bloom.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -41,8 +42,8 @@ var ( ) // syncBloomHasher is a wrapper around a byte blob to satisfy the interface API -// requirements of the bloom library used. It's used to convert a trie hash into -// a 64 bit mini hash. +// requirements of the bloom library used. It's used to convert a trie hash or +// contract code hash into a 64 bit mini hash. type syncBloomHasher []byte func (f syncBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } @@ -53,9 +54,9 @@ func (f syncBloomHasher) Size() int { return 8 } func (f syncBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) } // SyncBloom is a bloom filter used during fast sync to quickly decide if a trie -// node already exists on disk or not. It self populates from the provided disk -// database on creation in a background thread and will only start returning live -// results once that's finished. +// node or contract code already exists on disk or not. It self populates from the +// provided disk database on creation in a background thread and will only start +// returning live results once that's finished. type SyncBloom struct { bloom *bloomfilter.Filter inited uint32 @@ -107,10 +108,16 @@ func (b *SyncBloom) init(database ethdb.Iteratee) { ) for it.Next() && atomic.LoadUint32(&b.closed) == 0 { // If the database entry is a trie node, add it to the bloom - if key := it.Key(); len(key) == common.HashLength { + key := it.Key() + if len(key) == common.HashLength { b.bloom.Add(syncBloomHasher(key)) bloomLoadMeter.Mark(1) } + // If the database entry is a contract code, add it to the bloom + if ok, hash := rawdb.IsCodeKey(key); ok { + b.bloom.Add(syncBloomHasher(hash)) + bloomLoadMeter.Mark(1) + } // If enough time elapsed since the last iterator swap, restart if time.Since(swap) > 8*time.Second { key := common.CopyBytes(it.Key()) diff --git a/trie/sync_test.go b/trie/sync_test.go index 6025b87fcc..34f3990576 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -124,8 +124,10 @@ func testIterativeSync(t *testing.T, count int) { } results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -160,8 +162,10 @@ func TestIterativeDelayedSync(t *testing.T) { } results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -204,8 +208,10 @@ func testIterativeRandomSync(t *testing.T, count int) { results = append(results, SyncResult{hash, data}) } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -251,8 +257,10 @@ func TestIterativeRandomDelayedSync(t *testing.T) { } } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -298,8 +306,10 @@ func TestDuplicateAvoidanceSync(t *testing.T) { results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -336,8 +346,10 @@ func TestIncompleteSync(t *testing.T) { results[i] = SyncResult{hash, data} } // Process each of the trie nodes - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { diff --git a/trie/trie.go b/trie/trie.go index 78e2eff534..7ccd37f872 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -38,7 +38,7 @@ var ( // LeafCallback is a callback type invoked when a trie operation reaches a leaf // node. It's used by state sync and commit to allow handling external references // between account and storage tries. -type LeafCallback func(leaf []byte, parent common.Hash) error +type LeafCallback func(path []byte, leaf []byte, parent common.Hash) error // Trie is a Merkle Patricia Trie. // The zero value is an empty trie with no database. @@ -473,3 +473,9 @@ func (t *Trie) hashRoot(db *Database) (node, node, error) { t.unhashed = 0 return hashed, cached, nil } + +// Reset drops the referenced root node and cleans all internal state. +func (t *Trie) Reset() { + t.root = nil + t.unhashed = 0 +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 588562146a..2356b7a746 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -565,7 +565,7 @@ func BenchmarkCommitAfterHash(b *testing.B) { benchmarkCommitAfterHash(b, nil) }) var a account - onleaf := func(leaf []byte, parent common.Hash) error { + onleaf := func(path []byte, leaf []byte, parent common.Hash) error { rlp.DecodeBytes(leaf, &a) return nil }