From 54add4255064f722b33fe1c13e46db0b875fe325 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 14 Aug 2020 14:18:12 +0200 Subject: [PATCH 001/105] cmd/geth/tests: try to fix spurious travis failure in les tests (#21410) * cmd/geth/tests: try to fix spurious travis failure in les tests * cmd/geth: les_test - remove extraneous option during boot --- cmd/geth/les_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 1bf52d591a..ae9ed5ecf5 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -95,9 +95,9 @@ func (g *gethrpc) waitSynced() { } } -func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc { +func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { g := &gethrpc{name: name} - args = append([]string{"--networkid=42", "--port=0", "--nousb", "--http", "--http.port=0", "--http.api=admin,eth,les"}, args...) + args = append([]string{"--networkid=42", "--port=0", "--nousb"}, args...) t.Logf("Starting %v with rpc: %v", name, args) g.geth = runGeth(t, args...) // wait before we can attach to it. TODO: probe for it properly @@ -112,7 +112,7 @@ func startGethWithRpc(t *testing.T, name string, args ...string) *gethrpc { } func initGeth(t *testing.T) string { - g := runGeth(t, "--networkid=42", "init", "./testdata/clique.json") + g := runGeth(t, "--nousb", "--networkid=42", "init", "./testdata/clique.json") datadir := g.Datadir g.WaitExit() return datadir @@ -120,15 +120,15 @@ func initGeth(t *testing.T) string { func startLightServer(t *testing.T) *gethrpc { datadir := initGeth(t) - runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() + runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" - server := startGethWithRpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") return server } func startClient(t *testing.T, name string) *gethrpc { datadir := initGeth(t) - return startGethWithRpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") + return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") } func TestPriorityClient(t *testing.T) { @@ -166,6 +166,7 @@ func TestPriorityClient(t *testing.T) { freeCli.getNodeInfo().ID: freeCli, prioCli.getNodeInfo().ID: prioCli, } + time.Sleep(1 * time.Second) lightServer.callRPC(&peers, "admin_peers") peersWithNames := make(map[string]string) for _, p := range peers { From f3bafecef7dfc573e68d9c04fbd1ad55fb3c0d2f Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Tue, 18 Aug 2020 11:27:04 +0200 Subject: [PATCH 002/105] metrics: make meter updates lock-free (#21446) --- metrics/ewma.go | 5 ++-- metrics/meter.go | 65 +++++++++++++++++++++---------------------- metrics/meter_test.go | 5 ++-- 3 files changed, 37 insertions(+), 38 deletions(-) diff --git a/metrics/ewma.go b/metrics/ewma.go index 57c949e7d4..039286493e 100644 --- a/metrics/ewma.go +++ b/metrics/ewma.go @@ -4,6 +4,7 @@ import ( "math" "sync" "sync/atomic" + "time" ) // EWMAs continuously calculate an exponentially-weighted moving average @@ -85,7 +86,7 @@ type StandardEWMA struct { func (a *StandardEWMA) Rate() float64 { a.mutex.Lock() defer a.mutex.Unlock() - return a.rate * float64(1e9) + return a.rate * float64(time.Second) } // Snapshot returns a read-only copy of the EWMA. @@ -98,7 +99,7 @@ func (a *StandardEWMA) Snapshot() EWMA { func (a *StandardEWMA) Tick() { count := atomic.LoadInt64(&a.uncounted) atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) + instantRate := float64(count) / float64(5*time.Second) a.mutex.Lock() defer a.mutex.Unlock() if a.init { diff --git a/metrics/meter.go b/metrics/meter.go index 58d170fae0..7d2a2f5307 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -2,6 +2,7 @@ package metrics import ( "sync" + "sync/atomic" "time" ) @@ -101,6 +102,7 @@ func NewRegisteredMeterForced(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { count int64 + temp int64 rate1, rate5, rate15, rateMean float64 } @@ -149,7 +151,7 @@ func (NilMeter) Rate1() float64 { return 0.0 } // Rate5 is a no-op. func (NilMeter) Rate5() float64 { return 0.0 } -// Rate15is a no-op. +// Rate15 is a no-op. func (NilMeter) Rate15() float64 { return 0.0 } // RateMean is a no-op. @@ -167,7 +169,7 @@ type StandardMeter struct { snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time - stopped bool + stopped uint32 } func newStandardMeter() *StandardMeter { @@ -182,11 +184,8 @@ func newStandardMeter() *StandardMeter { // Stop stops the meter, Mark() will be a no-op if you use it after being stopped. func (m *StandardMeter) Stop() { - m.lock.Lock() - stopped := m.stopped - m.stopped = true - m.lock.Unlock() - if !stopped { + stopped := atomic.SwapUint32(&m.stopped, 1) + if stopped != 1 { arbiter.Lock() delete(arbiter.meters, m) arbiter.Unlock() @@ -194,57 +193,45 @@ func (m *StandardMeter) Stop() { } // Count returns the number of events recorded. +// It updates the meter to be as accurate as possible func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count + m.lock.Lock() + defer m.lock.Unlock() + m.updateMeter() + return m.snapshot.count } // Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - if m.stopped { - return - } - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() + atomic.AddInt64(&m.snapshot.temp, n) } // Rate1 returns the one-minute moving average rate of events per second. func (m *StandardMeter) Rate1() float64 { m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 + defer m.lock.RUnlock() + return m.snapshot.rate1 } // Rate5 returns the five-minute moving average rate of events per second. func (m *StandardMeter) Rate5() float64 { m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 + defer m.lock.RUnlock() + return m.snapshot.rate5 } // Rate15 returns the fifteen-minute moving average rate of events per second. func (m *StandardMeter) Rate15() float64 { m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 + defer m.lock.RUnlock() + return m.snapshot.rate15 } // RateMean returns the meter's mean rate of events per second. func (m *StandardMeter) RateMean() float64 { m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean + defer m.lock.RUnlock() + return m.snapshot.rateMean } // Snapshot returns a read-only copy of the meter. @@ -264,9 +251,19 @@ func (m *StandardMeter) updateSnapshot() { snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() } +func (m *StandardMeter) updateMeter() { + // should only run with write lock held on m.lock + n := atomic.LoadInt64(&m.snapshot.temp) + m.snapshot.count += n + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) +} + func (m *StandardMeter) tick() { m.lock.Lock() defer m.lock.Unlock() + m.updateMeter() m.a1.Tick() m.a5.Tick() m.a15.Tick() @@ -282,7 +279,7 @@ type meterArbiter struct { ticker *time.Ticker } -var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} +var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})} // Ticks meters on the scheduled interval func (ma *meterArbiter) tick() { diff --git a/metrics/meter_test.go b/metrics/meter_test.go index 28472253e8..9c43b61561 100644 --- a/metrics/meter_test.go +++ b/metrics/meter_test.go @@ -17,7 +17,7 @@ func TestGetOrRegisterMeter(t *testing.T) { r := NewRegistry() NewRegisteredMeter("foo", r).Mark(47) if m := GetOrRegisterMeter("foo", r); m.Count() != 47 { - t.Fatal(m) + t.Fatal(m.Count()) } } @@ -29,10 +29,11 @@ func TestMeterDecay(t *testing.T) { defer ma.ticker.Stop() m := newStandardMeter() ma.meters[m] = struct{}{} - go ma.tick() m.Mark(1) + ma.tickMeters() rateMean := m.RateMean() time.Sleep(100 * time.Millisecond) + ma.tickMeters() if m.RateMean() >= rateMean { t.Error("m.RateMean() didn't decrease") } From 2ff464b29da35f9590562ca4fba7283c96712f7e Mon Sep 17 00:00:00 2001 From: Giuseppe Bertone Date: Wed, 19 Aug 2020 08:54:21 +0200 Subject: [PATCH 003/105] core/state: fixed some comments (#21450) --- core/state/state_object.go | 9 ++++----- core/state/statedb.go | 10 +++++----- core/state/statedb_test.go | 10 +++++----- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 015a673781..26ab67e1ad 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -375,22 +375,21 @@ func (s *stateObject) CommitTrie(db Database) error { return err } -// AddBalance removes amount from c's balance. +// AddBalance adds amount to s's balance. // It is used to add funds to the destination account of a transfer. func (s *stateObject) AddBalance(amount *big.Int) { - // EIP158: We must check emptiness for the objects such that the account + // EIP161: We must check emptiness for the objects such that the account // clearing (0,0,0 objects) can take effect. if amount.Sign() == 0 { if s.empty() { s.touch() } - return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) } -// SubBalance removes amount from c's balance. +// SubBalance removes amount from s's balance. // It is used to remove funds from the origin account of a transfer. func (s *stateObject) SubBalance(amount *big.Int) { if amount.Sign() == 0 { @@ -455,7 +454,7 @@ func (s *stateObject) Code(db Database) []byte { } // CodeSize returns the size of the contract code associated with this object, -// or zero if none. This methos is an almost mirror of Code, but uses a cache +// or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. func (s *stateObject) CodeSize(db Database) int { if s.code != nil { diff --git a/core/state/statedb.go b/core/state/statedb.go index 17dd474314..0134a9d443 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -58,7 +58,7 @@ func (n *proofList) Delete(key []byte) error { panic("not supported") } -// StateDBs within the ethereum protocol are used to store anything +// StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: // * Contracts @@ -115,7 +115,7 @@ type StateDB struct { SnapshotCommits time.Duration } -// Create a new state from a given trie. +// New creates a new state from a given trie. func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { tr, err := db.OpenTrie(root) if err != nil { @@ -250,7 +250,7 @@ func (s *StateDB) Empty(addr common.Address) bool { return so == nil || so.empty() } -// Retrieve the balance from the given address or 0 if object not found +// GetBalance retrieves the balance from the given address or 0 if object not found func (s *StateDB) GetBalance(addr common.Address) *big.Int { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -318,7 +318,7 @@ func (s *StateDB) GetProof(a common.Address) ([][]byte, error) { return [][]byte(proof), err } -// GetProof returns the StorageProof for given key +// GetStorageProof returns the StorageProof for given key func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { var proof proofList trie := s.StorageTrie(a) @@ -560,7 +560,7 @@ func (s *StateDB) setStateObject(object *stateObject) { s.stateObjects[object.Address()] = object } -// Retrieve a state object or create a new state object if nil. +// GetOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { stateObject := s.getStateObject(addr) if stateObject == nil { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 824a597498..36ff271331 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -144,7 +144,7 @@ func TestIntermediateLeaks(t *testing.T) { } } -// TestCopy tests that copying a statedb object indeed makes the original and +// TestCopy tests that copying a StateDB object indeed makes the original and // the copy independent of each other. This test is a regression test against // https://github.com/ethereum/go-ethereum/pull/15549. func TestCopy(t *testing.T) { @@ -647,11 +647,11 @@ func TestCopyCopyCommitCopy(t *testing.T) { } // TestDeleteCreateRevert tests a weird state transition corner case that we hit -// while changing the internals of statedb. The workflow is that a contract is -// self destructed, then in a followup transaction (but same block) it's created +// while changing the internals of StateDB. The workflow is that a contract is +// self-destructed, then in a follow-up transaction (but same block) it's created // again and the transaction reverted. // -// The original statedb implementation flushed dirty objects to the tries after +// The original StateDB implementation flushed dirty objects to the tries after // each transaction, so this works ok. The rework accumulated writes in memory // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { @@ -681,7 +681,7 @@ func TestDeleteCreateRevert(t *testing.T) { } } -// TestMissingTrieNodes tests that if the statedb fails to load parts of the trie, +// TestMissingTrieNodes tests that if the StateDB fails to load parts of the trie, // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { From 32b078d418117a9cd52dcd655a065710c2e5ebff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 19 Aug 2020 10:28:08 +0300 Subject: [PATCH 004/105] build: drop disco, enable groovy on Ubuntu PPAs --- build/ci.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build/ci.go b/build/ci.go index 35c114dd95..ae5aaf5ac8 100644 --- a/build/ci.go +++ b/build/ci.go @@ -139,13 +139,14 @@ var ( // Note: zesty is unsupported because it was officially deprecated on Launchpad. // Note: artful is unsupported because it was officially deprecated on Launchpad. // Note: cosmic is unsupported because it was officially deprecated on Launchpad. + // Note: disco is unsupported because it was officially deprecated on Launchpad. debDistroGoBoots = map[string]string{ "trusty": "golang-1.11", "xenial": "golang-go", "bionic": "golang-go", - "disco": "golang-go", "eoan": "golang-go", "focal": "golang-go", + "groovy": "golang-go", } debGoBootPaths = map[string]string{ From 7ebc6c43ff5907117a4c5a097ccaca9df9a10b98 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 19 Aug 2020 11:31:13 +0200 Subject: [PATCH 005/105] cmd/evm: statet8n output folder + tx hashes on trace filenames (#21406) * t8ntool: add output basedir * t8ntool: add txhash to trace filename * t8ntool: don't default to '.' basedir, allow absolute paths --- cmd/evm/README.md | 18 ++++++------ cmd/evm/internal/t8ntool/execution.go | 4 +-- cmd/evm/internal/t8ntool/flags.go | 5 ++++ cmd/evm/internal/t8ntool/transition.go | 40 +++++++++++++++++--------- cmd/evm/main.go | 1 + cmd/evm/transition-test.sh | 4 +-- 6 files changed, 46 insertions(+), 26 deletions(-) diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 418417475d..8f0848bde8 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -29,6 +29,8 @@ Command line params that has to be supported are --trace Output full trace logs to files .jsonl --trace.nomemory Disable full memory dump in traces --trace.nostack Disable stack output in traces + --trace.noreturndata Disable return data output in traces + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".") --output.alloc alloc Determines where to put the alloc of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output @@ -232,13 +234,13 @@ Example where blockhashes are provided: ./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace ``` ``` -cat trace-0.jsonl | grep BLOCKHASH -C2 +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 ``` ``` -{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"depth":1,"refund":0,"opName":"PUSH1","error":""} -{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} -{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"depth":1,"refund":0,"opName":"STOP","error":""} -{"output":"","gasUsed":"0x17","time":155861} +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":112885} ``` In this example, the caller has not provided the required blockhash: @@ -254,9 +256,9 @@ Error code: 4 Another thing that can be done, is to chain invocations: ``` ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json -INFO [06-29|11:52:04.934] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [06-29|11:52:04.936] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [06-29|11:52:04.936] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" ``` What happened here, is that we first applied two identical transactions, so the second one was rejected. diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index a4fa971ebb..0fd6b869fc 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -81,7 +81,7 @@ type stEnvMarshaling struct { // Apply applies a set of transactions to a pre-state func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { // Capture errors for BLOCKHASH operation, if we haven't been supplied the // required blockhashes @@ -135,7 +135,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rejectedTxs = append(rejectedTxs, i) continue } - tracer, err := getTracerFn(txIndex) + tracer, err := getTracerFn(txIndex, tx.Hash()) if err != nil { return nil, nil, err } diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index d110af2c30..424156ba82 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -42,6 +42,11 @@ var ( Name: "trace.noreturndata", Usage: "Disable return data output in traces", } + OutputBasedir = cli.StringFlag{ + Name: "output.basedir", + Usage: "Specifies where output files are placed. Will be created if it does not exist.", + Value: "", + } OutputAllocFlag = cli.StringFlag{ Name: "output.alloc", Usage: "Determines where to put the `alloc` of the post-state.\n" + diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 079307b975..5119ed5fb7 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "math/big" "os" + "path" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -75,11 +76,22 @@ func Main(ctx *cli.Context) error { log.Root().SetHandler(glogger) var ( - err error - tracer vm.Tracer + err error + tracer vm.Tracer + baseDir = "" ) - var getTracer func(txIndex int) (vm.Tracer, error) + var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) + // If user specified a basedir, make sure it exists + if ctx.IsSet(OutputBasedir.Name) { + if base := ctx.String(OutputBasedir.Name); len(base) > 0 { + err := os.MkdirAll(base, 0755) // //rw-r--r-- + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + baseDir = base + } + } if ctx.Bool(TraceFlag.Name) { // Configure the EVM logger logConfig := &vm.LogConfig{ @@ -95,11 +107,11 @@ func Main(ctx *cli.Context) error { prevFile.Close() } }() - getTracer = func(txIndex int) (vm.Tracer, error) { + getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { if prevFile != nil { prevFile.Close() } - traceFile, err := os.Create(fmt.Sprintf("trace-%d.jsonl", txIndex)) + traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String()))) if err != nil { return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) } @@ -107,7 +119,7 @@ func Main(ctx *cli.Context) error { return vm.NewJSONLogger(logConfig, traceFile), nil } } else { - getTracer = func(txIndex int) (tracer vm.Tracer, err error) { + getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { return nil, nil } } @@ -197,7 +209,7 @@ func Main(ctx *cli.Context) error { //postAlloc := state.DumpGenesisFormat(false, false, false) collector := make(Alloc) state.DumpToCollector(collector, false, false, false, nil, -1) - return dispatchOutput(ctx, result, collector) + return dispatchOutput(ctx, baseDir, result, collector) } @@ -224,12 +236,12 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { } // saveFile marshalls the object to the given file -func saveFile(filename string, data interface{}) error { +func saveFile(baseDir, filename string, data interface{}) error { b, err := json.MarshalIndent(data, "", " ") if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } - if err = ioutil.WriteFile(filename, b, 0644); err != nil { + if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil { return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) } return nil @@ -237,26 +249,26 @@ func saveFile(filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, result *ExecutionResult, alloc Alloc) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) - dispatch := func(fName, name string, obj interface{}) error { + dispatch := func(baseDir, fName, name string, obj interface{}) error { switch fName { case "stdout": stdOutObject[name] = obj case "stderr": stdErrObject[name] = obj default: // save to file - if err := saveFile(fName, obj); err != nil { + if err := saveFile(baseDir, fName, obj); err != nil { return err } } return nil } - if err := dispatch(ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { + if err := dispatch(baseDir, ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { return err } - if err := dispatch(ctx.String(OutputResultFlag.Name), "result", result); err != nil { + if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { return err } if len(stdOutObject) > 0 { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 7b472350d9..35c672142d 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -146,6 +146,7 @@ var stateTransitionCommand = cli.Command{ t8ntool.TraceDisableMemoryFlag, t8ntool.TraceDisableStackFlag, t8ntool.TraceDisableReturnDataFlag, + t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, t8ntool.OutputResultFlag, t8ntool.InputAllocFlag, diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index d1400ca577..34c9249855 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -155,10 +155,10 @@ echo "Example where blockhashes are provided: " cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" tick && echo $cmd && tick $cmd 2>&1 >/dev/null -cmd="cat trace-0.jsonl | grep BLOCKHASH -C2" +cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" tick && echo $cmd && tick echo "$ticks" -cat trace-0.jsonl | grep BLOCKHASH -C2 +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 echo "$ticks" echo "" From 0bdd295cc0065e0f61e595ada4d3abe58f35c448 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 20 Aug 2020 09:49:35 +0200 Subject: [PATCH 006/105] core: more detailed metering for reorgs (#21420) --- core/blockchain.go | 8 ++++++-- core/tx_pool.go | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 499c8eee67..6acef13c41 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -70,8 +70,11 @@ var ( blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) - blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) - blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) + + blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) + blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) + blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) + blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) @@ -2152,6 +2155,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) blockReorgAddMeter.Mark(int64(len(newChain))) blockReorgDropMeter.Mark(int64(len(oldChain))) + blockReorgMeter.Mark(1) } else { log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) } diff --git a/core/tx_pool.go b/core/tx_pool.go index 3100acf4d2..0fe1d3db5b 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1422,6 +1422,8 @@ func (pool *TxPool) demoteUnexecutables() { pool.enqueueTx(hash, tx) } pendingGauge.Dec(int64(len(gapped))) + // This might happen in a reorg, so log it to the metering + blockReorgInvalidatedTx.Mark(int64(len(gapped))) } // Delete the entire pending entry if it became empty. if list.Empty() { From 8cbdc8638fd28693f84d7bdbbdd587e8c57f6383 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 20 Aug 2020 13:01:24 +0300 Subject: [PATCH 007/105] core: define and test chain rewind corner cases (#21409) * core: define and test chain reparation cornercases * core: write up a variety of set-head tests * core, eth: unify chain rollbacks, handle all the cases * core: make linter smile * core: remove commented out legacy code * core, eth/downloader: fix review comments * core: revert a removed recovery mechanism --- core/blockchain.go | 203 ++- core/blockchain_repair_test.go | 1653 ++++++++++++++++++++++++ core/blockchain_sethead_test.go | 1949 +++++++++++++++++++++++++++++ core/blockchain_test.go | 46 +- core/headerchain.go | 52 +- core/rawdb/accessors_chain.go | 26 + core/rawdb/database.go | 17 + core/rawdb/freezer.go | 79 +- core/rawdb/schema.go | 3 + eth/downloader/downloader.go | 61 +- eth/downloader/downloader_test.go | 53 +- eth/sync.go | 18 +- trie/sync.go | 6 +- 13 files changed, 3952 insertions(+), 214 deletions(-) create mode 100644 core/blockchain_repair_test.go create mode 100644 core/blockchain_sethead_test.go diff --git a/core/blockchain.go b/core/blockchain.go index 6acef13c41..9dc1fa9c65 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -130,6 +130,16 @@ type CacheConfig struct { SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } +// defaultCacheConfig are the default caching values if none are specified by the +// user (also used during testing). +var defaultCacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, +} + // BlockChain represents the canonical chain given a database with a genesis // block. The Blockchain manages chain imports, reverts, chain reorganisations. // @@ -204,13 +214,7 @@ type BlockChain struct { // Processor. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { if cacheConfig == nil { - cacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } + cacheConfig = defaultCacheConfig } bodyCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit) @@ -268,15 +272,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par txIndexBlock = frozen } } - if err := bc.loadLastState(); err != nil { return nil, err } - // The first thing the node will do is reconstruct the verification data for - // the head block (ethash cache or clique voting snapshot). Might as well do - // it in advance. - bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) - + // Make sure the state associated with the block is available + head := bc.CurrentBlock() + if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { + log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) + if err := bc.SetHead(head.NumberU64()); err != nil { + return nil, err + } + } + // Ensure that a previous crash in SetHead doesn't leave extra ancients if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { var ( needRewind bool @@ -286,7 +293,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // blockchain repair. If the head full block is even lower than the ancient // chain, truncate the ancient store. fullBlock := bc.CurrentBlock() - if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 { + if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { needRewind = true low = fullBlock.NumberU64() } @@ -301,15 +308,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } } if needRewind { - var hashes []common.Hash - previous := bc.CurrentHeader().Number.Uint64() - for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ { - hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i)) + log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) + if err := bc.SetHead(low); err != nil { + return nil, err } - bc.Rollback(hashes) - log.Warn("Truncate ancient chain", "from", previous, "to", low) } } + // The first thing the node will do is reconstruct the verification data for + // the head block (ethash cache or clique voting snapshot). Might as well do + // it in advance. + bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) + // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for hash := range BadHashes { if header := bc.GetHeaderByHash(hash); header != nil { @@ -318,7 +327,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // make sure the headerByNumber (if present) is in our current canonical chain if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) - bc.SetHead(header.Number.Uint64() - 1) + if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { + return nil, err + } log.Error("Chain rewind was successful, resuming normal operation") } } @@ -385,15 +396,6 @@ func (bc *BlockChain) loadLastState() error { log.Warn("Head block missing, resetting chain", "hash", head) return bc.Reset() } - // Make sure the state associated with the block is available - if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // Dangling block without a state associated, init from scratch - log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) - if err := bc.repair(¤tBlock); err != nil { - return err - } - rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) - } // Everything seems to be fine, set as the head block bc.currentBlock.Store(currentBlock) headBlockGauge.Update(int64(currentBlock.NumberU64())) @@ -427,30 +429,48 @@ func (bc *BlockChain) loadLastState() error { log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) - + if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { + log.Info("Loaded last fast-sync pivot marker", "number", *pivot) + } return nil } -// SetHead rewinds the local chain to a new head. In the case of headers, everything -// above the new head will be deleted and the new one set. In the case of blocks -// though, the head may be further rewound if block bodies are missing (non-archive -// nodes after a fast sync). +// SetHead rewinds the local chain to a new head. Depending on whether the node +// was fast synced or full synced and in which state, the method will try to +// delete minimal data from disk whilst retaining chain consistency. func (bc *BlockChain) SetHead(head uint64) error { - log.Warn("Rewinding blockchain", "target", head) - bc.chainmu.Lock() defer bc.chainmu.Unlock() - updateFn := func(db ethdb.KeyValueWriter, header *types.Header) { - // Rewind the block chain, ensuring we don't end up with a stateless head block - if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() { + // Retrieve the last pivot block to short circuit rollbacks beyond it and the + // current freezer limit to start nuking id underflown + pivot := rawdb.ReadLastPivotNumber(bc.db) + frozen, _ := bc.db.Ancients() + + updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { + // Rewind the block chain, ensuring we don't end up with a stateless head + // block. Note, depth equality is permitted to allow using SetHead as a + // chain reparation mechanism without deleting any data! + if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) if newHeadBlock == nil { + log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock } else { - if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // Rewound state missing, rolled back to before pivot, reset to genesis - newHeadBlock = bc.genesisBlock + // Block exists, keep rewinding until we find one with state + for { + if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { + log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + if pivot == nil || newHeadBlock.NumberU64() > *pivot { + newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) + continue + } else { + log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) + newHeadBlock = bc.genesisBlock + } + } + log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + break } } rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) @@ -462,7 +482,6 @@ func (bc *BlockChain) SetHead(head uint64) error { bc.currentBlock.Store(newHeadBlock) headBlockGauge.Update(int64(newHeadBlock.NumberU64())) } - // Rewind the fast block in a simpleton way to the target head if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) @@ -479,8 +498,17 @@ func (bc *BlockChain) SetHead(head uint64) error { bc.currentFastBlock.Store(newHeadFastBlock) headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) } - } + head := bc.CurrentBlock().NumberU64() + // If setHead underflown the freezer threshold and the block processing + // intent afterwards is full block importing, delete the chain segment + // between the stateful-block and the sethead target. + var wipe bool + if head+1 < frozen { + wipe = pivot == nil || head >= *pivot + } + return head, wipe // Only force wipe if full synced + } // Rewind the header chain, deleting all block bodies until then delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { // Ignore the error here since light client won't hit this path @@ -488,10 +516,9 @@ func (bc *BlockChain) SetHead(head uint64) error { if num+1 <= frozen { // Truncate all relative data(header, total difficulty, body, receipt // and canonical hash) from ancient store. - if err := bc.db.TruncateAncients(num + 1); err != nil { + if err := bc.db.TruncateAncients(num); err != nil { log.Crit("Failed to truncate ancient data", "number", num, "err", err) } - // Remove the hash <-> number mapping from the active store. rawdb.DeleteHeaderNumber(db, hash) } else { @@ -503,8 +530,18 @@ func (bc *BlockChain) SetHead(head uint64) error { } // Todo(rjl493456442) txlookup, bloombits, etc } - bc.hc.SetHead(head, updateFn, delFn) - + // If SetHead was only called as a chain reparation method, try to skip + // touching the header chain altogether, unless the freezer is broken + if block := bc.CurrentBlock(); block.NumberU64() == head { + if target, force := updateFn(bc.db, block.Header()); force { + bc.hc.SetHead(target, updateFn, delFn) + } + } else { + // Rewind the chain to the requested head and keep going backwards until a + // block with a state is found or fast sync pivot is passed + log.Warn("Rewinding blockchain", "target", head) + bc.hc.SetHead(head, updateFn, delFn) + } // Clear out any stale content from the caches bc.bodyCache.Purge() bc.bodyRLPCache.Purge() @@ -627,28 +664,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { return nil } -// repair tries to repair the current blockchain by rolling back the current block -// until one with associated state is found. This is needed to fix incomplete db -// writes caused either by crashes/power outages, or simply non-committed tries. -// -// This method only rolls back the current block. The current header and current -// fast block are left intact. -func (bc *BlockChain) repair(head **types.Block) error { - for { - // Abort if we've rewound to a head block that does have associated state - if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil { - log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) - return nil - } - // Otherwise rewind one block and recheck state availability there - block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) - if block == nil { - return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) - } - *head = block - } -} - // Export writes the active chain to the given writer. func (bc *BlockChain) Export(w io.Writer) error { return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) @@ -985,52 +1000,6 @@ const ( SideStatTy ) -// Rollback is designed to remove a chain of links from the database that aren't -// certain enough to be valid. -func (bc *BlockChain) Rollback(chain []common.Hash) { - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - batch := bc.db.NewBatch() - for i := len(chain) - 1; i >= 0; i-- { - hash := chain[i] - - // Degrade the chain markers if they are explicitly reverted. - // In theory we should update all in-memory markers in the - // last step, however the direction of rollback is from high - // to low, so it's safe the update in-memory markers directly. - currentHeader := bc.hc.CurrentHeader() - if currentHeader.Hash() == hash { - newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1) - rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash) - bc.hc.SetCurrentHeader(newHeadHeader) - } - if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { - newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) - rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash()) - bc.currentFastBlock.Store(newFastBlock) - headFastBlockGauge.Update(int64(newFastBlock.NumberU64())) - } - if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { - newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) - rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash()) - bc.currentBlock.Store(newBlock) - headBlockGauge.Update(int64(newBlock.NumberU64())) - } - } - if err := batch.Write(); err != nil { - log.Crit("Failed to rollback chain markers", "err", err) - } - // Truncate ancient data which exceeds the current header. - // - // Notably, it can happen that system crashes without truncating the ancient data - // but the head indicator has been updated in the active store. Regarding this issue, - // system will self recovery by truncating the extra data during the setup phase. - if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil { - log.Crit("Truncate ancient store failed", "err", err) - } -} - // truncateAncient rewinds the blockchain to the specified header and deletes all // data in the ancient store that exceeds the specified header. func (bc *BlockChain) truncateAncient(head uint64) error { diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go new file mode 100644 index 0000000000..27903dd06b --- /dev/null +++ b/core/blockchain_repair_test.go @@ -0,0 +1,1653 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Tests that abnormal program termination (i.e.crash) and restart doesn't leave +// the database in some strange state with gaps in the chain, nor with block data +// dangling in the future. + +package core + +import ( + "io/ioutil" + "math/big" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" +) + +// Tests a recovery for a short canonical chain where a recent block was already +// committed to disk and then the process crashed. In this case we expect the full +// chain to be rolled back to the committed block, but the chain data itself left +// in the database for replaying. +func TestShortRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain where the fast sync pivot point was +// already committed, after which the process crashed. In this case we expect the full +// chain to be rolled back to the committed block, but the chain data itself left in +// the database for replaying. +func TestShortFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain where the fast sync pivot point was +// not yet committed, but the process crashed. In this case we expect the chain to +// detect that it was fast syncing and not delete anything, since we can just pick +// up directly where we left off. +func TestShortFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// test scenario the side chain is below the committed block. In this case we expect +// the canonical chain to be rolled back to the committed block, but the chain data +// itself left in the database for replaying. +func TestShortOldForkedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this test scenario the side chain is below the committed block. In +// this case we expect the canonical chain to be rolled back to the committed block, +// but the chain data itself left in the database for replaying. +func TestShortOldForkedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In this +// test scenario the side chain is below the committed block. In this case we expect +// the chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestShortOldForkedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// test scenario the side chain reaches above the committed block. In this case we +// expect the canonical chain to be rolled back to the committed block, but the +// chain data itself left in the database for replaying. +func TestShortNewlyForkedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this test scenario the side chain reaches above the committed block. +// In this case we expect the canonical chain to be rolled back to the committed +// block, but the chain data itself left in the database for replaying. +func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In +// this test scenario the side chain reaches above the committed block. In this +// case we expect the chain to detect that it was fast syncing and not delete +// anything, since we can just pick up directly where we left off. +func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 6, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 6, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where a +// recent block was already committed to disk and then the process crashed. In this +// case we expect the canonical chain to be rolled back to the committed block, but +// the chain data itself left in the database for replaying. +func TestShortReorgedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where +// the fast sync pivot point was already committed to disk and then the process +// crashed. In this case we expect the canonical chain to be rolled back to the +// committed block, but the chain data itself left in the database for replaying. +func TestShortReorgedFastSyncedRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a short canonical chain and a longer side chain, where +// the fast sync pivot point was not yet committed, but the process crashed. In +// this case we expect the chain to detect that it was fast syncing and not delete +// anything, since we can just pick up directly where we left off. +func TestShortReorgedFastSyncingRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 8, + expSidechainBlocks: 10, + expFrozen: 0, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where a recent +// block - newer than the ancient limit - was already committed to disk and then +// the process crashed. In this case we expect the chain to be rolled back to the +// committed block, with everything afterwads kept as fast sync data. +func TestLongShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where a recent +// block - older than the ancient limit - was already committed to disk and then +// the process crashed. In this case we expect the chain to be rolled back to the +// committed block, with everything afterwads deleted. +func TestLongDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was already committed, after +// which the process crashed. In this case we expect the chain to be rolled back +// to the committed block, with everything afterwads kept as fast sync data. +func TestLongFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was already committed, after +// which the process crashed. In this case we expect the chain to be rolled back +// to the committed block, with everything afterwads deleted. +func TestLongFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was not yet committed, but the +// process crashed. In this case we expect the chain to detect that it was fast +// syncing and not delete anything, since we can just pick up directly where we +// left off. +func TestLongFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was not yet committed, but the +// process crashed. In this case we expect the chain to detect that it was fast +// syncing and not delete anything, since we can just pick up directly where we +// left off. +func TestLongFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongOldForkedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the canonical chain +// to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongOldForkedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is below the committed block. In this case we expect the chain +// to be rolled back to the committed block, with everything afterwads kept as +// fast sync data; the side chain completely nuked by the freezer. +func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// chain to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongNewerForkedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the canonical chain +// to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongNewerForkedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is above the committed block. In this case we expect the chain +// to be rolled back to the committed block, with everything afterwads kept as fast +// sync data; the side chain completely nuked by the freezer. +func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this test scenario +// the side chain is above the committed block. In this case we expect the canonical +// chain to be rolled back to the committed block, with everything afterwads deleted; +// the side chain completely nuked by the freezer. +func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this test scenario the side +// chain is above the committed block. In this case we expect the chain to detect +// that it was fast syncing and not delete anything. The side chain is completely +// nuked by the freezer. +func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then the process crashed. In this case we expect the chain to be +// rolled back to the committed block, with everything afterwads kept as fast sync +// data. The side chain completely nuked by the freezer. +func TestLongReorgedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then the process crashed. In this case we expect the canonical chains +// to be rolled back to the committed block, with everything afterwads deleted. The +// side chain completely nuked by the freezer. +func TestLongReorgedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then the process crashed. In this case we +// expect the chain to be rolled back to the committed block, with everything +// afterwads kept as fast sync data. The side chain completely nuked by the +// freezer. +func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then the process crashed. In this case we +// expect the canonical chains to be rolled back to the committed block, with +// everything afterwads deleted. The side chain completely nuked by the freezer. +func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but the process crashed. In this case we expect the +// chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 + // + // Expected head header : C18 + // Expected head fast block: C18 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 18, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 18, + expHeadFastBlock: 18, + expHeadBlock: 0, + }) +} + +// Tests a recovery for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but the process crashed. In this case we expect the +// chain to detect that it was fast syncing and not delete anything, since we +// can just pick up directly where we left off. +func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected in leveldb: + // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 + // + // Expected head header : C24 + // Expected head fast block: C24 + // Expected head block : G + testRepair(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + expCanonicalBlocks: 24, + expSidechainBlocks: 0, + expFrozen: 9, + expHeadHeader: 24, + expHeadFastBlock: 24, + expHeadBlock: 0, + }) +} + +func testRepair(t *testing.T, tt *rewindTest) { + // It's hard to follow the test case, visualize the input + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //fmt.Println(tt.dump(true)) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() // Might double close, should be fine + + // Initialize a fresh chain + var ( + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + ) + chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + // If sidechain blocks are needed, make a light chain and import it + var sideblocks types.Blocks + if tt.sidechainBlocks > 0 { + sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x01}) + }) + if _, err := chain.InsertChain(sideblocks); err != nil { + t.Fatalf("Failed to import side chain: %v", err) + } + } + canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x02}) + b.SetDifficulty(big.NewInt(1000000)) + }) + if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + if tt.commitBlock > 0 { + chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + } + if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + // Force run a freeze cycle + type freezer interface { + Freeze(threshold uint64) + Ancients() (uint64, error) + } + db.(freezer).Freeze(tt.freezeThreshold) + + // Set the simulated pivot block + if tt.pivotBlock != nil { + rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) + } + // Pull the plug on the database, simulating a hard crash + db.Close() + + // Start a new blockchain back up and see where the repait leads us + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to reopen persistent database: %v", err) + } + defer db.Close() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, canonblocks) + verifyNoGaps(t, chain, false, sideblocks) + verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks) + verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + } + if frozen, err := db.(freezer).Ancients(); err != nil { + t.Errorf("Failed to retrieve ancient count: %v\n", err) + } else if int(frozen) != tt.expFrozen { + t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) + } +} diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go new file mode 100644 index 0000000000..dc1368ff4b --- /dev/null +++ b/core/blockchain_sethead_test.go @@ -0,0 +1,1949 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Tests that setting the chain head backwards doesn't leave the database in some +// strange state with gaps in the chain, nor with block data dangling in the future. + +package core + +import ( + "fmt" + "io/ioutil" + "math/big" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" +) + +// rewindTest is a test case for chain rollback upon user request. +type rewindTest struct { + canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier) + sidechainBlocks int // Number of blocks to generate for the side chain (lighter) + freezeThreshold uint64 // Block number until which to move things into the freezer + commitBlock uint64 // Block number for which to commit the state to disk + pivotBlock *uint64 // Pivot block number in case of fast sync + + setheadBlock uint64 // Block number to set head back to + expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis) + expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis) + expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis) + expHeadHeader uint64 // Block number of the expected head header + expHeadFastBlock uint64 // Block number of the expected head fast sync block + expHeadBlock uint64 // Block number of the expected head full block +} + +func (tt *rewindTest) dump(crash bool) string { + buffer := new(strings.Builder) + + fmt.Fprint(buffer, "Chain:\n G") + for i := 0; i < tt.canonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, " (HEAD)\n") + if tt.sidechainBlocks > 0 { + fmt.Fprintf(buffer, " └") + for i := 0; i < tt.sidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + fmt.Fprintf(buffer, "\n") + + if tt.canonicalBlocks > int(tt.freezeThreshold) { + fmt.Fprint(buffer, "Frozen:\n G") + for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprintf(buffer, "\n\n") + } else { + fmt.Fprintf(buffer, "Frozen: none\n") + } + fmt.Fprintf(buffer, "Commit: G") + if tt.commitBlock > 0 { + fmt.Fprintf(buffer, ", C%d", tt.commitBlock) + } + fmt.Fprint(buffer, "\n") + + if tt.pivotBlock == nil { + fmt.Fprintf(buffer, "Pivot : none\n") + } else { + fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock) + } + if crash { + fmt.Fprintf(buffer, "\nCRASH\n\n") + } else { + fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock) + } + fmt.Fprintf(buffer, "------------------------------\n\n") + + if tt.expFrozen > 0 { + fmt.Fprint(buffer, "Expected in freezer:\n G") + for i := 0; i < tt.expFrozen-1; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprintf(buffer, "\n\n") + } + if tt.expFrozen > 0 { + if tt.expFrozen >= tt.expCanonicalBlocks { + fmt.Fprintf(buffer, "Expected in leveldb: none\n") + } else { + fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1) + for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, "\n") + if tt.expSidechainBlocks > tt.expFrozen { + fmt.Fprintf(buffer, " └") + for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + } + } else { + fmt.Fprint(buffer, "Expected in leveldb:\n G") + for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, "\n") + if tt.expSidechainBlocks > tt.expFrozen { + fmt.Fprintf(buffer, " └") + for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ { + fmt.Fprintf(buffer, "->S%d", i+1) + } + fmt.Fprintf(buffer, "\n") + } + } + fmt.Fprintf(buffer, "\n") + fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader) + fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock) + if tt.expHeadBlock == 0 { + fmt.Fprintf(buffer, "Expected head block : G\n") + } else { + fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock) + } + return buffer.String() +} + +// Tests a sethead for a short canonical chain where a recent block was already +// committed to disk and then the sethead called. In this case we expect the full +// chain to be rolled back to the committed block. Everything above the sethead +// point should be deleted. In between the committed block and the requested head +// the data can remain as "fast sync" data to avoid redownloading it. +func TestShortSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain where the fast sync pivot point was +// already committed, after which sethead was called. In this case we expect the +// chain to behave like in full sync mode, rolling back to the committed block +// Everything above the sethead point should be deleted. In between the committed +// block and the requested head the data can remain as "fast sync" data to avoid +// redownloading it. +func TestShortFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain where the fast sync pivot point was +// not yet committed, but sethead was called. In this case we expect the chain to +// detect that it was fast syncing and delete everything from the new head, since +// we can just pick up fast syncing from there. The head full block should be set +// to the genesis. +func TestShortFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 0, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// test scenario the side chain is below the committed block. In this case we expect +// the canonical full chain to be rolled back to the committed block. Everything +// above the sethead point should be deleted. In between the committed block and +// the requested head the data can remain as "fast sync" data to avoid redownloading +// it. The side chain should be left alone as it was shorter. +func TestShortOldForkedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this test scenario the side chain is below the committed block. In +// this case we expect the canonical full chain to be rolled back to the committed +// block. Everything above the sethead point should be deleted. In between the +// committed block and the requested head the data can remain as "fast sync" data +// to avoid redownloading it. The side chain should be left alone as it was shorter. +func TestShortOldForkedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In this +// test scenario the side chain is below the committed block. In this case we expect +// the chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The head full block +// should be set to the genesis. +func TestShortOldForkedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 3, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// test scenario the side chain reaches above the committed block. In this case we +// expect the canonical full chain to be rolled back to the committed block. All +// data above the sethead point should be deleted. In between the committed block +// and the requested head the data can remain as "fast sync" data to avoid having +// to redownload it. The side chain should be truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this case we expect the canonical full chain to be rolled back to +// between the committed block and the requested head the data can remain as +// "fast sync" data to avoid having to redownload it. The side chain should be +// truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a shorter side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In +// this test scenario the side chain reaches above the committed block. In this +// case we expect the chain to detect that it was fast syncing and delete +// everything from the new head, since we can just pick up fast syncing from +// there. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 10, + sidechainBlocks: 8, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where a +// recent block was already committed to disk and then sethead was called. In this +// case we expect the canonical full chain to be rolled back to the committed block. +// All data above the sethead point should be deleted. In between the committed +// block and the requested head the data can remain as "fast sync" data to avoid +// having to redownload it. The side chain should be truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : none + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where +// the fast sync pivot point was already committed to disk and then sethead was +// called. In this case we expect the canonical full chain to be rolled back to +// the committed block. All data above the sethead point should be deleted. In +// between the committed block and the requested head the data can remain as +// "fast sync" data to avoid having to redownload it. The side chain should be +// truncated to the head set. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedFastSyncedSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a short canonical chain and a longer side chain, where +// the fast sync pivot point was not yet committed, but sethead was called. In +// this case we expect the chain to detect that it was fast syncing and delete +// everything from the new head, since we can just pick up fast syncing from +// there. +// +// The side chain could be left to be if the fork point was before the new head +// we are deleting to, but it would be exceedingly hard to detect that case and +// properly handle it, so we'll trade extra work in exchange for simpler code. +func TestShortReorgedFastSyncingSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 + // + // Frozen: none + // Commit: G + // Pivot : C4 + // + // SetHead(7) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7 + // └->S1->S2->S3->S4->S5->S6->S7 + // + // Expected head header : C7 + // Expected head fast block: C7 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 8, + sidechainBlocks: 10, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 7, + expCanonicalBlocks: 7, + expSidechainBlocks: 7, + expFrozen: 0, + expHeadHeader: 7, + expHeadFastBlock: 7, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where a recent +// block - newer than the ancient limit - was already committed to disk and then +// sethead was called. In this case we expect the full chain to be rolled back +// to the committed block. Everything above the sethead point should be deleted. +// In between the committed block and the requested head the data can remain as +// "fast sync" data to avoid redownloading it. +func TestLongShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where a recent +// block - older than the ancient limit - was already committed to disk and then +// sethead was called. In this case we expect the full chain to be rolled back +// to the committed block. Since the ancient limit was underflown, everything +// needs to be deleted onwards to avoid creating a gap. +func TestLongDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was already committed, after +// which sethead was called. In this case we expect the full chain to be rolled +// back to the committed block. Everything above the sethead point should be +// deleted. In between the committed block and the requested head the data can +// remain as "fast sync" data to avoid redownloading it. +func TestLongFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was already committed, after +// which sethead was called. In this case we expect the full chain to be rolled +// back to the committed block. Since the ancient limit was underflown, everything +// needs to be deleted onwards to avoid creating a gap. +func TestLongFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - newer than the ancient limit - was not yet committed, but +// sethead was called. In this case we expect the chain to detect that it was fast +// syncing and delete everything from the new head, since we can just pick up fast +// syncing from there. +func TestLongFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks where the fast +// sync pivot point - older than the ancient limit - was not yet committed, but +// sethead was called. In this case we expect the chain to detect that it was fast +// syncing and delete everything from the new head, since we can just pick up fast +// syncing from there. +func TestLongFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 0, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then sethead was called. In this case we expect the canonical full +// chain to be rolled back to the committed block. Everything above the sethead point +// should be deleted. In between the committed block and the requested head the data +// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked +// by the freezer. +func TestLongOldForkedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then sethead was called. In this case we expect the canonical full +// chain to be rolled back to the committed block. Since the ancient limit was +// underflown, everything needs to be deleted onwards to avoid creating a gap. The +// side chain is nuked by the freezer. +func TestLongOldForkedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// full chain to be rolled back to the committed block. Everything above the +// sethead point should be deleted. In between the committed block and the +// requested head the data can remain as "fast sync" data to avoid redownloading +// it. The side chain is nuked by the freezer. +func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is below the committed block. In this case we expect the canonical +// full chain to be rolled back to the committed block. Since the ancient limit was +// underflown, everything needs to be deleted onwards to avoid creating a gap. The +// side chain is nuked by the freezer. +func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and delete everything from the new head, since we can +// just pick up fast syncing from there. The side chain is completely nuked by the +// freezer. +func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is below the committed block. In this case we expect the chain to detect +// that it was fast syncing and delete everything from the new head, since we can +// just pick up fast syncing from there. The side chain is completely nuked by the +// freezer. +func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 3, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - newer than the ancient limit - was already +// committed to disk and then sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongShallowSetHead. +func TestLongNewerForkedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where a recent block - older than the ancient limit - was already +// committed to disk and then sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongDeepSetHead. +func TestLongNewerForkedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is above the committed block. In this case the freezer will delete +// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead. +func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this test scenario +// the side chain is above the committed block. In this case the freezer will delete +// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead. +func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead. +func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a shorter +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this test scenario the side +// chain is above the committed block. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead. +func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 12, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - newer than the ancient limit - was already committed +// to disk and then sethead was called. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongShallowSetHead. +func TestLongReorgedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer side +// chain, where a recent block - older than the ancient limit - was already committed +// to disk and then sethead was called. In this case the freezer will delete the +// sidechain since it's dangling, reverting to TestLongDeepSetHead. +func TestLongReorgedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : none + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: nil, + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was already committed to disk and then sethead was called. In this case the +// freezer will delete the sidechain since it's dangling, reverting to +// TestLongFastSyncedShallowSetHead. +func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was already committed to disk and then sethead was called. In this case the +// freezer will delete the sidechain since it's dangling, reverting to +// TestLongFastSyncedDeepSetHead. +func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G, C4 + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4 + // + // Expected in leveldb: none + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 4, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 4, + expSidechainBlocks: 0, + expFrozen: 5, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - newer than the ancient limit - +// was not yet committed, but sethead was called. In this case we expect the +// chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The side chain is +// completely nuked by the freezer. +func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2 + // + // Expected in leveldb: + // C2)->C3->C4->C5->C6 + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 18, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 3, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +// Tests a sethead for a long canonical chain with frozen blocks and a longer +// side chain, where the fast sync pivot point - older than the ancient limit - +// was not yet committed, but sethead was called. In this case we expect the +// chain to detect that it was fast syncing and delete everything from the new +// head, since we can just pick up fast syncing from there. The side chain is +// completely nuked by the freezer. +func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) + // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 + // + // Frozen: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Commit: G + // Pivot : C4 + // + // SetHead(6) + // + // ------------------------------ + // + // Expected in freezer: + // G->C1->C2->C3->C4->C5->C6 + // + // Expected in leveldb: none + // + // Expected head header : C6 + // Expected head fast block: C6 + // Expected head block : G + testSetHead(t, &rewindTest{ + canonicalBlocks: 24, + sidechainBlocks: 26, + freezeThreshold: 16, + commitBlock: 0, + pivotBlock: uint64ptr(4), + setheadBlock: 6, + expCanonicalBlocks: 6, + expSidechainBlocks: 0, + expFrozen: 7, + expHeadHeader: 6, + expHeadFastBlock: 6, + expHeadBlock: 0, + }) +} + +func testSetHead(t *testing.T, tt *rewindTest) { + // It's hard to follow the test case, visualize the input + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //fmt.Println(tt.dump(false)) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() + + // Initialize a fresh chain + var ( + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + ) + chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + // If sidechain blocks are needed, make a light chain and import it + var sideblocks types.Blocks + if tt.sidechainBlocks > 0 { + sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x01}) + }) + if _, err := chain.InsertChain(sideblocks); err != nil { + t.Fatalf("Failed to import side chain: %v", err) + } + } + canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x02}) + b.SetDifficulty(big.NewInt(1000000)) + }) + if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + if tt.commitBlock > 0 { + chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + } + if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + // Manually dereference anything not committed to not have to work with 128+ tries + for _, block := range sideblocks { + chain.stateCache.TrieDB().Dereference(block.Root()) + } + for _, block := range canonblocks { + chain.stateCache.TrieDB().Dereference(block.Root()) + } + // Force run a freeze cycle + type freezer interface { + Freeze(threshold uint64) + Ancients() (uint64, error) + } + db.(freezer).Freeze(tt.freezeThreshold) + + // Set the simulated pivot block + if tt.pivotBlock != nil { + rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) + } + // Set the head of the chain back to the requested number + chain.SetHead(tt.setheadBlock) + + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, canonblocks) + verifyNoGaps(t, chain, false, sideblocks) + verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks) + verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + } + if frozen, err := db.(freezer).Ancients(); err != nil { + t.Errorf("Failed to retrieve ancient count: %v\n", err) + } else if int(frozen) != tt.expFrozen { + t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) + } +} + +// verifyNoGaps checks that there are no gaps after the initial set of blocks in +// the database and errors if found. +func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) { + t.Helper() + + var end uint64 + for i := uint64(0); i <= uint64(len(inserted)); i++ { + header := chain.GetHeaderByNumber(i) + if header == nil && end == 0 { + end = i + } + if header != nil && end > 0 { + if canonical { + t.Errorf("Canonical header gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain header gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } + end = 0 + for i := uint64(0); i <= uint64(len(inserted)); i++ { + block := chain.GetBlockByNumber(i) + if block == nil && end == 0 { + end = i + } + if block != nil && end > 0 { + if canonical { + t.Errorf("Canonical block gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain block gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } + end = 0 + for i := uint64(1); i <= uint64(len(inserted)); i++ { + receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()) + if receipts == nil && end == 0 { + end = i + } + if receipts != nil && end > 0 { + if canonical { + t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1) + } else { + t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1) + } + end = 0 // Reset for further gap detection + } + } +} + +// verifyCutoff checks that there are no chain data available in the chain after +// the specified limit, but that it is available before. +func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) { + t.Helper() + + for i := 1; i <= len(inserted); i++ { + if i <= head { + if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil { + if canonical { + t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil { + if canonical { + t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil { + if canonical { + t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + } else { + if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil { + if canonical { + t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil { + if canonical { + t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil { + if canonical { + t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } else { + t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) + } + } + } + } +} + +// uint64ptr is a weird helper to allow 1-line constant pointer creation. +func uint64ptr(n uint64) *uint64 { + return &n +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 0d810699f6..41fc4920ca 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -731,12 +731,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { return db, func() { os.RemoveAll(dir) } } // Configure a subchain to roll back - remove := []common.Hash{} - for _, block := range blocks[height/2:] { - remove = append(remove, block.Hash()) - } + remove := blocks[height/2].NumberU64() + // Create a small assertion method to check the three heads assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) { + t.Helper() + if num := chain.CurrentBlock().NumberU64(); num != block { t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block) } @@ -750,14 +750,18 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Import the chain as an archive node and ensure all pointers are updated archiveDb, delfn := makeDb() defer delfn() - archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + + archiveCaching := *defaultCacheConfig + archiveCaching.TrieDirtyDisabled = true + + archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := archive.InsertChain(blocks); err != nil { t.Fatalf("failed to process block %d: %v", n, err) } defer archive.Stop() assert(t, "archive", archive, height, height, height) - archive.Rollback(remove) + archive.SetHead(remove - 1) assert(t, "archive", archive, height/2, height/2, height/2) // Import the chain as a non-archive node and ensure all pointers are updated @@ -777,7 +781,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to insert receipt %d: %v", n, err) } assert(t, "fast", fast, height, height, 0) - fast.Rollback(remove) + fast.SetHead(remove - 1) assert(t, "fast", fast, height/2, height/2, 0) // Import the chain as a ancient-first node and ensure all pointers are updated @@ -793,12 +797,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { t.Fatalf("failed to insert receipt %d: %v", n, err) } assert(t, "ancient", ancient, height, height, 0) - ancient.Rollback(remove) - assert(t, "ancient", ancient, height/2, height/2, 0) - if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 { - t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen) - } + ancient.SetHead(remove - 1) + assert(t, "ancient", ancient, 0, 0, 0) + if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 { + t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen) + } // Import the chain as a light node and ensure all pointers are updated lightDb, delfn := makeDb() defer delfn() @@ -809,7 +813,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { defer light.Stop() assert(t, "light", light, height, 0, 0) - light.Rollback(remove) + light.SetHead(remove - 1) assert(t, "light", light, height/2, 0, 0) } @@ -1585,6 +1589,7 @@ func TestBlockchainRecovery(t *testing.T) { t.Fatalf("failed to create temp freezer dir: %v", err) } defer os.Remove(frdir) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) @@ -1602,6 +1607,7 @@ func TestBlockchainRecovery(t *testing.T) { if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { t.Fatalf("failed to insert receipt %d: %v", n, err) } + rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior ancient.Stop() // Destroy head fast block manually @@ -1912,11 +1918,9 @@ func testInsertKnownChainData(t *testing.T, typ string) { asserter(t, blocks[len(blocks)-1]) // Import a long canonical chain with some known data as prefix. - var rollback []common.Hash - for i := len(blocks) / 2; i < len(blocks); i++ { - rollback = append(rollback, blocks[i].Hash()) - } - chain.Rollback(rollback) + rollback := blocks[len(blocks)/2].NumberU64() + + chain.SetHead(rollback - 1) if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { t.Fatalf("failed to insert chain data: %v", err) } @@ -1936,11 +1940,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { asserter(t, blocks3[len(blocks3)-1]) // Rollback the heavier chain and re-insert the longer chain again - for i := 0; i < len(blocks3); i++ { - rollback = append(rollback, blocks3[i].Hash()) - } - chain.Rollback(rollback) - + chain.SetHead(rollback - 1) if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { t.Fatalf("failed to insert chain data: %v", err) } diff --git a/core/headerchain.go b/core/headerchain.go index a6028d8b9a..f5a8e21cfc 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -488,8 +488,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { type ( // UpdateHeadBlocksCallback is a callback function that is called by SetHead - // before head header is updated. - UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) + // before head header is updated. The method will return the actual block it + // updated the head to (missing state) and a flag if setHead should continue + // rewinding till that forcefully (exceeded ancient limits) + UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) // DeleteBlockContentCallback is a callback function that is called by SetHead // before each header is deleted. @@ -502,9 +504,10 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d var ( parentHash common.Hash batch = hc.chainDb.NewBatch() + origin = true ) for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { - hash, num := hdr.Hash(), hdr.Number.Uint64() + num := hdr.Number.Uint64() // Rewind block chain to new head. parent := hc.GetHeader(hdr.ParentHash, num-1) @@ -512,16 +515,21 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d parent = hc.genesisHeader } parentHash = hdr.ParentHash + // Notably, since geth has the possibility for setting the head to a low // height which is even lower than ancient head. // In order to ensure that the head is always no higher than the data in - // the database(ancient store or active store), we need to update head + // the database (ancient store or active store), we need to update head // first then remove the relative data from the database. // // Update head first(head fast block, head full block) before deleting the data. markerBatch := hc.chainDb.NewBatch() if updateFn != nil { - updateFn(markerBatch, parent) + newHead, force := updateFn(markerBatch, parent) + if force && newHead < head { + log.Warn("Force rewinding till ancient limit", "head", newHead) + head = newHead + } } // Update head header then. rawdb.WriteHeadHeaderHash(markerBatch, parentHash) @@ -532,14 +540,34 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d hc.currentHeaderHash = parentHash headHeaderGauge.Update(parent.Number.Int64()) - // Remove the relative data from the database. - if delFn != nil { - delFn(batch, hash, num) + // If this is the first iteration, wipe any leftover data upwards too so + // we don't end up with dangling daps in the database + var nums []uint64 + if origin { + for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { + nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path + } + origin = false + } + nums = append(nums, num) + + // Remove the related data from the database on all sidechains + for _, num := range nums { + // Gather all the side fork hashes + hashes := rawdb.ReadAllHashes(hc.chainDb, num) + if len(hashes) == 0 { + // No hashes in the database whatsoever, probably frozen already + hashes = append(hashes, hdr.Hash()) + } + for _, hash := range hashes { + if delFn != nil { + delFn(batch, hash, num) + } + rawdb.DeleteHeader(batch, hash, num) + rawdb.DeleteTd(batch, hash, num) + } + rawdb.DeleteCanonicalHash(batch, num) } - // Rewind header chain to new head. - rawdb.DeleteHeader(batch, hash, num) - rawdb.DeleteTd(batch, hash, num) - rawdb.DeleteCanonicalHash(batch, num) } // Flush all accumulated deletions. if err := batch.Write(); err != nil { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 8dd1f6345a..c948cdc7c6 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -187,6 +187,32 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { } } +// ReadLastPivotNumber retrieves the number of the last pivot block. If the node +// full synced, the last pivot will always be nil. +func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { + data, _ := db.Get(lastPivotKey) + if len(data) == 0 { + return nil + } + var pivot uint64 + if err := rlp.DecodeBytes(data, &pivot); err != nil { + log.Error("Invalid pivot block number in database", "err", err) + return nil + } + return &pivot +} + +// WriteLastPivotNumber stores the number of the last pivot block. +func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { + enc, err := rlp.EncodeToBytes(pivot) + if err != nil { + log.Crit("Failed to encode pivot block number", "err", err) + } + if err := db.Put(lastPivotKey, enc); err != nil { + log.Crit("Failed to store pivot block number", "err", err) + } +} + // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow // reporting correct numbers across restarts. func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index eb3f86a76e..d22ca1c529 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "os" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -53,6 +54,22 @@ func (frdb *freezerdb) Close() error { return nil } +// Freeze is a helper method used for external testing to trigger and block until +// a freeze cycle completes, without having to sleep for a minute to trigger the +// automatic background run. +func (frdb *freezerdb) Freeze(threshold uint64) { + // Set the freezer threshold to a temporary value + defer func(old uint64) { + atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old) + }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold)) + atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold) + + // Trigger a freeze cycle and block until it's done + trigger := make(chan struct{}, 1) + frdb.AncientStore.(*freezer).trigger <- trigger + <-trigger +} + // nofreezedb is a database wrapper that disables freezer data retrievals. type nofreezedb struct { ethdb.KeyValueStore diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 621d35d3f4..5744b0cbb3 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -70,12 +70,16 @@ type freezer struct { // WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). - frozen uint64 // Number of blocks already frozen + frozen uint64 // Number of blocks already frozen + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) tables map[string]*freezerTable // Data tables for storing everything instanceLock fileutil.Releaser // File-system lock to prevent double opens - quit chan struct{} - closeOnce sync.Once + + trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + + quit chan struct{} + closeOnce sync.Once } // newFreezer creates a chain freezer that moves ancient chain data into @@ -102,8 +106,10 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { } // Open all the supported data tables freezer := &freezer{ + threshold: params.FullImmutabilityThreshold, tables: make(map[string]*freezerTable), instanceLock: lock, + trigger: make(chan chan struct{}), quit: make(chan struct{}), } for name, disableSnappy := range freezerNoSnappy { @@ -261,7 +267,10 @@ func (f *freezer) Sync() error { func (f *freezer) freeze(db ethdb.KeyValueStore) { nfdb := &nofreezedb{KeyValueStore: db} - backoff := false + var ( + backoff bool + triggered chan struct{} // Used in tests + ) for { select { case <-f.quit: @@ -270,9 +279,16 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { default: } if backoff { + // If we were doing a manual trigger, notify it + if triggered != nil { + triggered <- struct{}{} + triggered = nil + } select { case <-time.NewTimer(freezerRecheckInterval).C: backoff = false + case triggered = <-f.trigger: + backoff = false case <-f.quit: return } @@ -285,18 +301,20 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { continue } number := ReadHeaderNumber(nfdb, hash) + threshold := atomic.LoadUint64(&f.threshold) + switch { case number == nil: log.Error("Current full block number unavailable", "hash", hash) backoff = true continue - case *number < params.FullImmutabilityThreshold: - log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.FullImmutabilityThreshold) + case *number < threshold: + log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) backoff = true continue - case *number-params.FullImmutabilityThreshold <= f.frozen: + case *number-threshold <= f.frozen: log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen) backoff = true continue @@ -308,7 +326,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { continue } // Seems we have data ready to be frozen, process in usable batches - limit := *number - params.FullImmutabilityThreshold + limit := *number - threshold if limit-f.frozen > freezerBatchLimit { limit = f.frozen + freezerBatchLimit } @@ -317,7 +335,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { first = f.frozen ancients = make([]common.Hash, 0, limit-f.frozen) ) - for f.frozen < limit { + for f.frozen <= limit { // Retrieves all the components of the canonical block hash := ReadCanonicalHash(nfdb, f.frozen) if hash == (common.Hash{}) { @@ -368,11 +386,15 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { log.Crit("Failed to delete frozen canonical blocks", "err", err) } batch.Reset() - // Wipe out side chain also. + + // Wipe out side chains also and track dangling side chians + var dangling []common.Hash for number := first; number < f.frozen; number++ { // Always keep the genesis block in active database if number != 0 { - for _, hash := range ReadAllHashes(db, number) { + dangling = ReadAllHashes(db, number) + for _, hash := range dangling { + log.Trace("Deleting side chain", "number", number, "hash", hash) DeleteBlock(batch, hash, number) } } @@ -380,6 +402,41 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { if err := batch.Write(); err != nil { log.Crit("Failed to delete frozen side blocks", "err", err) } + batch.Reset() + + // Step into the future and delete and dangling side chains + if f.frozen > 0 { + tip := f.frozen + for len(dangling) > 0 { + drop := make(map[common.Hash]struct{}) + for _, hash := range dangling { + log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + drop[hash] = struct{}{} + } + children := ReadAllHashes(db, tip) + for i := 0; i < len(children); i++ { + // Dig up the child and ensure it's dangling + child := ReadHeader(nfdb, children[i], tip) + if child == nil { + log.Error("Missing dangling header", "number", tip, "hash", children[i]) + continue + } + if _, ok := drop[child.ParentHash]; !ok { + children = append(children[:i], children[i+1:]...) + i-- + continue + } + // Delete all block data associated with the child + log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) + DeleteBlock(batch, children[i], tip) + } + dangling = children + tip++ + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete dangling side blocks", "err", err) + } + } // Log something friendly for the user context := []interface{}{ "blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1, diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 5a41199a7c..b87e7888cc 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -38,6 +38,9 @@ var ( // headFastBlockKey tracks the latest known incomplete block's hash during fast sync. headFastBlockKey = []byte("LastFast") + // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead). + lastPivotKey = []byte("LastPivot") + // fastTrieProgressKey tracks the number of trie entries imported during fast sync. fastTrieProgressKey = []byte("TrieSync") diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 8edcdc6222..83b757fa17 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -176,8 +176,8 @@ type LightChain interface { // InsertHeaderChain inserts a batch of headers into the local chain. InsertHeaderChain([]*types.Header, int) (int, error) - // Rollback removes a few recently added elements from the local chain. - Rollback([]common.Hash) + // SetHead rewinds the local chain to a new head. + SetHead(uint64) error } // BlockChain encapsulates functions required to sync a (full or fast) blockchain. @@ -469,6 +469,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I if pivot <= origin { origin = pivot - 1 } + // Write out the pivot into the database so a rollback beyond it will + // reenable fast sync + rawdb.WriteLastPivotNumber(d.stateDB, pivot) } } d.committed = 1 @@ -496,6 +499,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.ancientLimit = height - fullMaxForkAncestry - 1 } frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. + // If a part of blockchain data has already been written into active store, // disable the ancient style insertion explicitly. if origin >= frozen && frozen != 0 { @@ -506,11 +510,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I } // Rewind the ancient store and blockchain if reorg happens. if origin+1 < frozen { - var hashes []common.Hash - for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ { - hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i)) + if err := d.lightchain.SetHead(origin + 1); err != nil { + return err } - d.lightchain.Rollback(hashes) } } // Initiate the sync using a concurrent header and content retrieval algorithm @@ -1382,35 +1384,32 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { // Keep a count of uncertain headers to roll back var ( - rollback []*types.Header + rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) rollbackErr error mode = d.getMode() ) defer func() { - if len(rollback) > 0 { - // Flatten the headers and roll them back - hashes := make([]common.Hash, len(rollback)) - for i, header := range rollback { - hashes[i] = header.Hash() - } + if rollback > 0 { lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 if mode != LightSync { lastFastBlock = d.blockchain.CurrentFastBlock().Number() lastBlock = d.blockchain.CurrentBlock().Number() } - d.lightchain.Rollback(hashes) + if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block + // We're already unwinding the stack, only print the error to make it more visible + log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) + } curFastBlock, curBlock := common.Big0, common.Big0 if mode != LightSync { curFastBlock = d.blockchain.CurrentFastBlock().Number() curBlock = d.blockchain.CurrentBlock().Number() } - log.Warn("Rolled back headers", "count", len(hashes), + log.Warn("Rolled back chain segment", "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) } }() - // Wait for batches of headers to process gotHeaders := false @@ -1462,7 +1461,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er } } // Disable any rollback and return - rollback = nil + rollback = 0 return nil } // Otherwise split the chunk of headers into batches and process them @@ -1481,15 +1480,9 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er limit = len(headers) } chunk := headers[:limit] + // In case of header only syncing, validate the chunk immediately if mode == FastSync || mode == LightSync { - // Collect the yet unknown headers to mark them as uncertain - unknown := make([]*types.Header, 0, len(chunk)) - for _, header := range chunk { - if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { - unknown = append(unknown, header) - } - } // If we're importing pure headers, verify based on their recentness frequency := fsHeaderCheckFrequency if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { @@ -1497,17 +1490,18 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er } if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { rollbackErr = err - // If some headers were inserted, add them too to the rollback list - if n > 0 { - rollback = append(rollback, chunk[:n]...) + + // If some headers were inserted, track them as uncertain + if n > 0 && rollback == 0 { + rollback = chunk[0].Number.Uint64() } log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, store newly found uncertain headers - rollback = append(rollback, unknown...) - if len(rollback) > fsHeaderSafetyNet { - rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) + // All verifications passed, track all headers within the alloted limits + head := chunk[len(chunk)-1].Number.Uint64() + if head-rollback > uint64(fsHeaderSafetyNet) { + rollback = head - uint64(fsHeaderSafetyNet) } } // Unless we're doing light chains, schedule the headers for associated content retrieval @@ -1613,6 +1607,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { } } go closeOnErr(sync) + // Figure out the ideal pivot block. Note, that this goalpost may move if the // sync takes long enough for the chain head to move significantly. pivot := uint64(0) @@ -1654,6 +1649,10 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) pivot = height - uint64(fsMinFullBlocks) + + // Write out the pivot into the database so a rollback beyond it will + // reenable fast sync + rawdb.WriteLastPivotNumber(d.stateDB, pivot) } } P, beforeP, afterP := splitAroundPivot(pivot, results) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index e774b2b89d..7c165c63c3 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -341,25 +341,52 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ return len(blocks), nil } -// Rollback removes some recently added elements from the chain. -func (dl *downloadTester) Rollback(hashes []common.Hash) { +// SetHead rewinds the local chain to a new head. +func (dl *downloadTester) SetHead(head uint64) error { dl.lock.Lock() defer dl.lock.Unlock() - for i := len(hashes) - 1; i >= 0; i-- { - if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { - dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] + // Find the hash of the head to reset to + var hash common.Hash + for h, header := range dl.ownHeaders { + if header.Number.Uint64() == head { + hash = h + } + } + for h, header := range dl.ancientHeaders { + if header.Number.Uint64() == head { + hash = h + } + } + if hash == (common.Hash{}) { + return fmt.Errorf("unknown head to set: %d", head) + } + // Find the offset in the header chain + var offset int + for o, h := range dl.ownHashes { + if h == hash { + offset = o + break } - delete(dl.ownChainTd, hashes[i]) - delete(dl.ownHeaders, hashes[i]) - delete(dl.ownReceipts, hashes[i]) - delete(dl.ownBlocks, hashes[i]) + } + // Remove all the hashes and associated data afterwards + for i := offset + 1; i < len(dl.ownHashes); i++ { + delete(dl.ownChainTd, dl.ownHashes[i]) + delete(dl.ownHeaders, dl.ownHashes[i]) + delete(dl.ownReceipts, dl.ownHashes[i]) + delete(dl.ownBlocks, dl.ownHashes[i]) - delete(dl.ancientChainTd, hashes[i]) - delete(dl.ancientHeaders, hashes[i]) - delete(dl.ancientReceipts, hashes[i]) - delete(dl.ancientBlocks, hashes[i]) + delete(dl.ancientChainTd, dl.ownHashes[i]) + delete(dl.ancientHeaders, dl.ownHashes[i]) + delete(dl.ancientReceipts, dl.ownHashes[i]) + delete(dl.ancientBlocks, dl.ownHashes[i]) } + dl.ownHashes = dl.ownHashes[:offset+1] + return nil +} + +// Rollback removes some recently added elements from the chain. +func (dl *downloadTester) Rollback(hashes []common.Hash) { } // newPeer registers a new block download source into the downloader. diff --git a/eth/sync.go b/eth/sync.go index 0982a9702d..26badd1e21 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -271,15 +271,25 @@ func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp { } func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { + // If we're in fast sync mode, return that directly if atomic.LoadUint32(&cs.pm.fastSync) == 1 { block := cs.pm.blockchain.CurrentFastBlock() td := cs.pm.blockchain.GetTdByHash(block.Hash()) return downloader.FastSync, td - } else { - head := cs.pm.blockchain.CurrentHeader() - td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - return downloader.FullSync, td } + // We are probably in full sync, but we might have rewound to before the + // fast sync pivot, check if we should reenable + if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil { + if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot { + block := cs.pm.blockchain.CurrentFastBlock() + td := cs.pm.blockchain.GetTdByHash(block.Hash()) + return downloader.FastSync, td + } + } + // Nope, we're really full syncing + head := cs.pm.blockchain.CurrentHeader() + td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + return downloader.FullSync, td } // startSync launches doSync in a new goroutine. diff --git a/trie/sync.go b/trie/sync.go index 978e76799a..620e97fa30 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -99,7 +99,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb if _, ok := s.membatch.batch[root]; ok { return } - if s.bloom.Contains(root[:]) { + if s.bloom == nil || s.bloom.Contains(root[:]) { // Bloom filter says this might be a duplicate, double check blob, _ := s.database.Get(root[:]) if local, err := decodeNode(root[:], blob); local != nil && err == nil { @@ -138,7 +138,7 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { if _, ok := s.membatch.batch[hash]; ok { return } - if s.bloom.Contains(hash[:]) { + if s.bloom == nil || s.bloom.Contains(hash[:]) { // Bloom filter says this might be a duplicate, double check if ok, _ := s.database.Has(hash[:]); ok { return @@ -300,7 +300,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { if _, ok := s.membatch.batch[hash]; ok { continue } - if s.bloom.Contains(node) { + if s.bloom == nil || s.bloom.Contains(node) { // Bloom filter says this might be a duplicate, double check if ok, _ := s.database.Has(node); ok { continue From 15fdaf20055323874a05bcae780014fb99e7cffd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 20 Aug 2020 16:41:37 +0300 Subject: [PATCH 008/105] travis, dockerfile, appveyor, build: bump to Go 1.15 --- .travis.yml | 32 +++++++++++++++++++++----------- Dockerfile | 2 +- Dockerfile.alltools | 2 +- appveyor.yml | 4 ++-- build/checksums.txt | 2 +- 5 files changed, 26 insertions(+), 16 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1b61667c88..245f52b362 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,7 +16,7 @@ jobs: - stage: lint os: linux dist: xenial - go: 1.14.x + go: 1.15.x env: - lint git: @@ -34,12 +34,22 @@ jobs: - go run build/ci.go install - go run build/ci.go test -coverage $TEST_PACKAGES + - stage: build + os: linux + dist: xenial + go: 1.14.x + env: + - GO111MODULE=on + script: + - go run build/ci.go install + - go run build/ci.go test -coverage $TEST_PACKAGES + # These are the latest Go versions. - stage: build os: linux arch: amd64 dist: xenial - go: 1.14.x + go: 1.15.x env: - GO111MODULE=on script: @@ -51,7 +61,7 @@ jobs: os: linux arch: arm64 dist: xenial - go: 1.14.x + go: 1.15.x env: - GO111MODULE=on script: @@ -61,7 +71,7 @@ jobs: - stage: build os: osx osx_image: xcode11.3 - go: 1.14.x + go: 1.15.x env: - GO111MODULE=on script: @@ -82,7 +92,7 @@ jobs: if: type = push os: linux dist: xenial - go: 1.14.x + go: 1.15.x env: - ubuntu-ppa - GO111MODULE=on @@ -99,7 +109,7 @@ jobs: - python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - - go run build/ci.go debsrc -goversion 1.14.2 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " + - go run build/ci.go debsrc -goversion 1.15 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " # This builder does the Linux Azure uploads - stage: build @@ -107,7 +117,7 @@ jobs: os: linux dist: xenial sudo: required - go: 1.14.x + go: 1.15.x env: - azure-linux - GO111MODULE=on @@ -144,7 +154,7 @@ jobs: dist: xenial services: - docker - go: 1.14.x + go: 1.15.x env: - azure-linux-mips - GO111MODULE=on @@ -192,7 +202,7 @@ jobs: git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz | tar -xz + - curl https://dl.google.com/go/go1.15.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go @@ -210,7 +220,7 @@ jobs: - stage: build if: type = push os: osx - go: 1.14.x + go: 1.15.x env: - azure-osx - azure-ios @@ -242,7 +252,7 @@ jobs: if: type = cron os: linux dist: xenial - go: 1.14.x + go: 1.15.x env: - azure-purge - GO111MODULE=on diff --git a/Dockerfile b/Dockerfile index 54453c4df5..0705361f5b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.14-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 9c28979a1e..e2604232cf 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.14-alpine as builder +FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/appveyor.yml b/appveyor.yml index fe15cc7f0e..7d6bf87639 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -24,8 +24,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.14.2.windows-%GETH_ARCH%.zip - - 7z x go1.14.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://dl.google.com/go/go1.15.windows-%GETH_ARCH%.zip + - 7z x go1.15.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/build/checksums.txt b/build/checksums.txt index c4b276e349..39f855cd0c 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,6 +1,6 @@ # This file contains sha256 checksums of optional build dependencies. -98de84e69726a66da7b4e58eac41b99cbe274d7e8906eeb8a5b7eb0aadee7f7c go1.14.2.src.tar.gz +69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a go1.15.src.tar.gz d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip From 4e54b1a45ead09c1f4ab85ba7f62accd8f672b12 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Fri, 21 Aug 2020 10:04:36 +0200 Subject: [PATCH 009/105] metrics: zero temp variable in updateMeter (#21470) * metrics: zero temp variable in updateMeter Previously the temp variable was not updated properly after summing it to count. This meant we had astronomically high metrics, now we zero out the temp whenever we sum it onto the snapshot count * metrics: move temp variable to be aligned, unit tests Moves the temp variable in MeterSnapshot to be 64-bit aligned because of the atomic bug. Adds a unit test, that catches the previous bug. --- metrics/meter.go | 8 ++++++-- metrics/meter_test.go | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/metrics/meter.go b/metrics/meter.go index 7d2a2f5307..60ae919d04 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -101,8 +101,12 @@ func NewRegisteredMeterForced(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { - count int64 + // WARNING: The `temp` field is accessed atomically. + // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is + // guaranteed to be so aligned, so take advantage of that. For more information, + // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG. temp int64 + count int64 rate1, rate5, rate15, rateMean float64 } @@ -253,7 +257,7 @@ func (m *StandardMeter) updateSnapshot() { func (m *StandardMeter) updateMeter() { // should only run with write lock held on m.lock - n := atomic.LoadInt64(&m.snapshot.temp) + n := atomic.SwapInt64(&m.snapshot.temp, 0) m.snapshot.count += n m.a1.Update(n) m.a5.Update(n) diff --git a/metrics/meter_test.go b/metrics/meter_test.go index 9c43b61561..b3f6cb8c0c 100644 --- a/metrics/meter_test.go +++ b/metrics/meter_test.go @@ -73,3 +73,19 @@ func TestMeterZero(t *testing.T) { t.Errorf("m.Count(): 0 != %v\n", count) } } + +func TestMeterRepeat(t *testing.T) { + m := NewMeter() + for i := 0; i < 101; i++ { + m.Mark(int64(i)) + } + if count := m.Count(); count != 5050 { + t.Errorf("m.Count(): 5050 != %v\n", count) + } + for i := 0; i < 101; i++ { + m.Mark(int64(i)) + } + if count := m.Count(); count != 10100 { + t.Errorf("m.Count(): 10100 != %v\n", count) + } +} From 9f7b79af00a1ed57ab8640636041a81b58ecff59 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 21 Aug 2020 13:27:10 +0200 Subject: [PATCH 010/105] eth/downloader: fix rollback issue on short chains --- eth/downloader/downloader.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 83b757fa17..f3b0926d77 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1502,6 +1502,8 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er head := chunk[len(chunk)-1].Number.Uint64() if head-rollback > uint64(fsHeaderSafetyNet) { rollback = head - uint64(fsHeaderSafetyNet) + } else { + rollback = 1 } } // Unless we're doing light chains, schedule the headers for associated content retrieval From 87c0ba92136a75db0ab2aba1046d4a9860375d6a Mon Sep 17 00:00:00 2001 From: gary rong Date: Fri, 21 Aug 2020 20:10:40 +0800 Subject: [PATCH 011/105] core, eth, les, trie: add a prefix to contract code (#21080) --- cmd/evm/internal/t8ntool/execution.go | 5 +- cmd/geth/retesteth.go | 2 +- consensus/clique/clique.go | 3 +- consensus/ethash/consensus.go | 3 +- core/block_validator.go | 5 +- core/blockchain.go | 25 +++- core/blockchain_test.go | 5 +- core/genesis.go | 3 +- core/rawdb/accessors_chain_test.go | 4 +- core/rawdb/accessors_indexes_test.go | 28 +++- core/rawdb/accessors_metadata.go | 17 --- core/rawdb/accessors_state.go | 96 +++++++++++++ core/rawdb/chain_iterator_test.go | 4 +- core/rawdb/database.go | 4 + core/rawdb/schema.go | 16 +++ core/state/database.go | 35 ++++- core/state/iterator_test.go | 6 +- core/state/statedb.go | 21 +-- core/state/sync.go | 2 +- core/state/sync_test.go | 95 +++++++++---- core/tx_pool_test.go | 3 +- core/types/block.go | 8 +- core/types/block_test.go | 28 +++- core/types/derive_sha.go | 17 ++- eth/downloader/downloader.go | 2 +- eth/downloader/queue.go | 5 +- eth/downloader/statesync.go | 8 +- eth/fetcher/block_fetcher.go | 5 +- eth/fetcher/block_fetcher_test.go | 3 +- eth/handler.go | 11 +- les/odr_requests.go | 4 +- les/server_handler.go | 2 +- light/odr.go | 2 +- light/odr_test.go | 2 +- light/trie.go | 4 +- miner/worker.go | 2 + trie/database.go | 129 ++++++------------ trie/secure_trie.go | 3 +- trie/sync.go | 187 ++++++++++++++++---------- trie/sync_bloom.go | 19 ++- trie/sync_test.go | 36 +++-- trie/trie.go | 6 + 42 files changed, 579 insertions(+), 286 deletions(-) create mode 100644 core/rawdb/accessors_state.go diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 0fd6b869fc..75586d588b 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -220,8 +221,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } execRs := &ExecutionResult{ StateRoot: root, - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: types.DeriveSha(receipts), + TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)), + ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)), Bloom: types.CreateBloom(receipts), LogsHash: rlpHash(statedb.Logs()), Receipts: receipts, diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go index f4ec832789..1d4c15d1e6 100644 --- a/cmd/geth/retesteth.go +++ b/cmd/geth/retesteth.go @@ -248,7 +248,7 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } } diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index a2e61bbc05..02f2451133 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" "golang.org/x/crypto/sha3" ) @@ -565,7 +566,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts), nil + return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index bbc554951d..bdc02098af 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" ) @@ -583,7 +584,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts), nil + return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/core/block_validator.go b/core/block_validator.go index b36ca56d7f..b7af12ff9e 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // BlockValidator is responsible for validating block headers, uncles and @@ -61,7 +62,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash) } - if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash { + if hash := types.DeriveSha(block.Transactions(), new(trie.Trie)); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) } if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { @@ -89,7 +90,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) } // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]])) - receiptSha := types.DeriveSha(receipts) + receiptSha := types.DeriveSha(receipts, new(trie.Trie)) if receiptSha != header.ReceiptHash { return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) } diff --git a/core/blockchain.go b/core/blockchain.go index 9dc1fa9c65..8434d2193f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -112,7 +112,10 @@ const ( // - Version 7 // The following incompatible database changes were added: // * Use freezer as the ancient database to maintain all ancient data - BlockChainVersion uint64 = 7 + // - Version 8 + // The following incompatible database changes were added: + // * New scheme for contract code in order to separate the codes and trie nodes + BlockChainVersion uint64 = 8 ) // CacheConfig contains the configuration values for the trie caching/pruning @@ -895,12 +898,30 @@ func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types. return uncles } -// TrieNode retrieves a blob of data associated with a trie node (or code hash) +// TrieNode retrieves a blob of data associated with a trie node // either from ephemeral in-memory cache, or from persistent storage. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { return bc.stateCache.TrieDB().Node(hash) } +// ContractCode retrieves a blob of data associated with a contract hash +// either from ephemeral in-memory cache, or from persistent storage. +func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { + return bc.stateCache.ContractCode(common.Hash{}, hash) +} + +// ContractCodeWithPrefix retrieves a blob of data associated with a contract +// hash either from ephemeral in-memory cache, or from persistent storage. +// +// If the code doesn't exist in the in-memory cache, check the storage with +// new code scheme. +func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { + type codeReader interface { + ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) + } + return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) +} + // Stop stops the blockchain service. If any imports are currently in progress // it will abort them using the procInterrupt. func (bc *BlockChain) Stop() { diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 41fc4920ca..7ec62b11dd 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // So we can deterministically seed different blockchains @@ -681,12 +682,12 @@ func TestFastVsFullChains(t *testing.T) { } if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) { + } else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) { t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) } - if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { + if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) { t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) } } diff --git a/core/genesis.go b/core/genesis.go index a4790854b6..4525b9c174 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" ) //go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go @@ -287,7 +288,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { statedb.Commit(false) statedb.Database().TrieDB().Commit(root, true, nil) - return types.NewBlock(head, nil, nil, nil) + return types.NewBlock(head, nil, nil, nil, new(trie.Trie)) } // Commit writes the block and state of a genesis specification to the database. diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 3eba2a3b4e..074c24d8fe 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -84,7 +84,7 @@ func TestBodyStorage(t *testing.T) { WriteBody(db, hash, 0, body) if entry := ReadBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) } if entry := ReadBodyRLP(db, hash, 0); entry == nil { @@ -138,7 +138,7 @@ func TestBlockStorage(t *testing.T) { } if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 49d00f9900..87338c62bf 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -18,6 +18,7 @@ package rawdb import ( "bytes" + "hash" "math/big" "testing" @@ -26,8 +27,33 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { tests := []struct { @@ -73,7 +99,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index f8d09fbddf..14a302a127 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -79,20 +79,3 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha log.Crit("Failed to store chain config", "err", err) } } - -// ReadPreimage retrieves a single preimage of the provided hash. -func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(preimageKey(hash)) - return data -} - -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go new file mode 100644 index 0000000000..6112de03ad --- /dev/null +++ b/core/rawdb/accessors_state.go @@ -0,0 +1,96 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// ReadPreimage retrieves a single preimage of the provided hash. +func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(preimageKey(hash)) + return data +} + +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} + +// ReadCode retrieves the contract code of the provided code hash. +func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { + // Try with the legacy code scheme first, if not then try with current + // scheme. Since most of the code will be found with legacy scheme. + // + // todo(rjl493456442) change the order when we forcibly upgrade the code + // scheme with snapshot. + data, _ := db.Get(hash[:]) + if len(data) != 0 { + return data + } + return ReadCodeWithPrefix(db, hash) +} + +// ReadCodeWithPrefix retrieves the contract code of the provided code hash. +// The main difference between this function and ReadCode is this function +// will only check the existence with latest scheme(with prefix). +func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(codeKey(hash)) + return data +} + +// WriteCode writes the provided contract code database. +func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { + if err := db.Put(codeKey(hash), code); err != nil { + log.Crit("Failed to store contract code", "err", err) + } +} + +// DeleteCode deletes the specified contract code from the database. +func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeKey(hash)); err != nil { + log.Crit("Failed to delete contract code", "err", err) + } +} + +// ReadTrieNode retrieves the trie node of the provided hash. +func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(hash.Bytes()) + return data +} + +// WriteTrieNode writes the provided trie node database. +func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store trie node", "err", err) + } +} + +// DeleteTrieNode deletes the specified trie node from the database. +func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete trie node", "err", err) + } +} diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index c99a97c5f8..c635cd2f12 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,11 +34,11 @@ func TestChainIterator(t *testing.T) { var txs []*types.Transaction for i := uint64(0); i <= 10; i++ { if i == 0 { - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil, newHasher()) // Empty genesis block } else { tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) } WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index d22ca1c529..316b5addf3 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -258,6 +258,7 @@ func InspectDatabase(db ethdb.Database) error { numHashPairing common.StorageSize hashNumPairing common.StorageSize trieSize common.StorageSize + codeSize common.StorageSize txlookupSize common.StorageSize accountSnapSize common.StorageSize storageSnapSize common.StorageSize @@ -316,6 +317,8 @@ func InspectDatabase(db ethdb.Database) error { chtTrieNodes += size case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: bloomTrieNodes += size + case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength: + codeSize += size case len(key) == common.HashLength: trieSize += size default: @@ -355,6 +358,7 @@ func InspectDatabase(db ethdb.Database) error { {"Key-Value store", "Block hash->number", hashNumPairing.String()}, {"Key-Value store", "Transaction index", txlookupSize.String()}, {"Key-Value store", "Bloombit index", bloomBitsSize.String()}, + {"Key-Value store", "Contract codes", codeSize.String()}, {"Key-Value store", "Trie nodes", trieSize.String()}, {"Key-Value store", "Trie preimages", preimageSize.String()}, {"Key-Value store", "Account snapshot", accountSnapSize.String()}, diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b87e7888cc..e2b093a34a 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -18,6 +18,7 @@ package rawdb import ( + "bytes" "encoding/binary" "github.com/ethereum/go-ethereum/common" @@ -69,6 +70,7 @@ var ( bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value + codePrefix = []byte("c") // codePrefix + code hash -> account code preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -192,6 +194,20 @@ func preimageKey(hash common.Hash) []byte { return append(preimagePrefix, hash.Bytes()...) } +// codeKey = codePrefix + hash +func codeKey(hash common.Hash) []byte { + return append(codePrefix, hash.Bytes()...) +} + +// IsCodeKey reports whether the given byte slice is the key of contract code, +// if so return the raw code hash as well. +func IsCodeKey(key []byte) (bool, []byte) { + if bytes.HasPrefix(key, codePrefix) && len(key) == common.HashLength+len(codePrefix) { + return true, key[len(codePrefix):] + } + return false, nil +} + // configKey = configPrefix + hash func configKey(hash common.Hash) []byte { return append(configPrefix, hash.Bytes()...) diff --git a/core/state/database.go b/core/state/database.go index 7bcec6d003..a9342f5179 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -17,9 +17,12 @@ package state import ( + "errors" "fmt" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" lru "github.com/hashicorp/golang-lru" @@ -28,6 +31,9 @@ import ( const ( // Number of codehash->size associations to keep. codeSizeCacheSize = 100000 + + // Cache size granted for caching clean code. + codeCacheSize = 64 * 1024 * 1024 ) // Database wraps access to tries and contract code. @@ -111,12 +117,14 @@ func NewDatabaseWithCache(db ethdb.Database, cache int, journal string) Database return &cachingDB{ db: trie.NewDatabaseWithCache(db, cache, journal), codeSizeCache: csc, + codeCache: fastcache.New(codeCacheSize), } } type cachingDB struct { db *trie.Database codeSizeCache *lru.Cache + codeCache *fastcache.Cache } // OpenTrie opens the main account trie at a specific root hash. @@ -141,11 +149,32 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { // ContractCode retrieves a particular contract's code. func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { - code, err := db.db.Node(codeHash) - if err == nil { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return code, nil + } + code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) + db.codeSizeCache.Add(codeHash, len(code)) + return code, nil + } + return nil, errors.New("not found") +} + +// ContractCodeWithPrefix retrieves a particular contract's code. If the +// code can't be found in the cache, then check the existence with **new** +// db scheme. +func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + return code, nil + } + code := rawdb.ReadCodeWithPrefix(db.db.DiskDB(), codeHash) + if len(code) > 0 { + db.codeCache.Set(codeHash.Bytes(), code) db.codeSizeCache.Add(codeHash, len(code)) + return code, nil } - return code, err + return nil, errors.New("not found") } // ContractCodeSize retrieves a particular contracts code's size. diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index 5060f7a651..d1afe9ca3e 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -28,6 +28,7 @@ import ( func TestNodeIteratorCoverage(t *testing.T) { // Create some arbitrary test state to iterate db, root, _ := makeTestState() + db.TrieDB().Commit(root, false, nil) state, err := New(root, db, nil) if err != nil { @@ -42,7 +43,10 @@ func TestNodeIteratorCoverage(t *testing.T) { } // Cross check the iterated hashes and the database/nodepool content for hash := range hashes { - if _, err := db.TrieDB().Node(hash); err != nil { + if _, err = db.TrieDB().Node(hash); err != nil { + _, err = db.ContractCode(common.Hash{}, hash) + } + if err != nil { t.Errorf("failed to retrieve reported node %x", hash) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 0134a9d443..cd020e6543 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -42,9 +43,6 @@ type revision struct { var ( // emptyRoot is the known root hash of an empty trie. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256Hash(nil) ) type proofList [][]byte @@ -589,7 +587,10 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } s.setStateObject(newobj) - return newobj, prev + if prev != nil && !prev.deleted { + return newobj, prev + } + return newobj, nil } // CreateAccount explicitly creates a state object. If a state object with the address @@ -816,11 +817,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { s.IntermediateRoot(deleteEmptyObjects) // Commit objects to the trie, measuring the elapsed time + codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object if obj.code != nil && obj.dirtyCode { - s.db.TrieDB().InsertBlob(common.BytesToHash(obj.CodeHash()), obj.code) + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false } // Write any storage changes in the state object to its storage trie @@ -832,6 +834,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) } + if codeWriter.ValueSize() > 0 { + if err := codeWriter.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + } + } // Write the account trie changes, measuing the amount of wasted time var start time.Time if metrics.EnabledExpensive { @@ -847,10 +854,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if account.Root != emptyRoot { s.db.TrieDB().Reference(account.Root, parent) } - code := common.BytesToHash(account.CodeHash) - if code != emptyCode { - s.db.TrieDB().Reference(code, parent) - } return nil }) if metrics.EnabledExpensive { diff --git a/core/state/sync.go b/core/state/sync.go index ef79305273..052cfad7bb 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -34,7 +34,7 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S return err } syncer.AddSubTrie(obj.Root, 64, parent, nil) - syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent) + syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), 64, parent) return nil } syncer = trie.NewSync(root, database, callback, bloom) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 924c8c2f90..17670750ed 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -133,13 +133,17 @@ func TestEmptyStateSync(t *testing.T) { // Tests that given a root hash, a state can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go. -func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } -func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } +func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1, false) } +func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100, false) } +func TestIterativeStateSyncIndividualFromDisk(t *testing.T) { testIterativeStateSync(t, 1, true) } +func TestIterativeStateSyncBatchedFromDisk(t *testing.T) { testIterativeStateSync(t, 100, true) } -func testIterativeStateSync(t *testing.T, count int) { +func testIterativeStateSync(t *testing.T, count int, commit bool) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() - + if commit { + srcDb.TrieDB().Commit(srcRoot, false, nil) + } // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) @@ -149,13 +153,18 @@ func testIterativeStateSync(t *testing.T, count int) { results := make([]trie.SyncResult, len(queue)) for i, hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -184,13 +193,18 @@ func TestIterativeDelayedStateSync(t *testing.T) { results := make([]trie.SyncResult, len(queue)/2+1) for i, hash := range queue[:len(results)] { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -226,14 +240,19 @@ func testIterativeRandomStateSync(t *testing.T, count int) { results := make([]trie.SyncResult, 0, len(queue)) for hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results = append(results, trie.SyncResult{Hash: hash, Data: data}) } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -270,6 +289,9 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { delete(queue, hash) data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } @@ -280,8 +302,10 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { } } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -302,6 +326,15 @@ func TestIncompleteStateSync(t *testing.T) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() + // isCode reports whether the hash is contract code hash. + isCode := func(hash common.Hash) bool { + for _, acc := range srcAccounts { + if hash == crypto.Keccak256Hash(acc.code) { + return true + } + } + return false + } checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) // Create a destination state and sync with the scheduler @@ -315,14 +348,19 @@ func TestIncompleteStateSync(t *testing.T) { results := make([]trie.SyncResult, len(queue)) for i, hash := range queue { data, err := srcDb.TrieDB().Node(hash) + if err != nil { + data, err = srcDb.ContractCode(common.Hash{}, hash) + } if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } results[i] = trie.SyncResult{Hash: hash, Data: data} } // Process each of the state nodes - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -333,12 +371,9 @@ func TestIncompleteStateSync(t *testing.T) { added = append(added, result.Hash) } // Check that all known sub-tries added so far are complete or missing entirely. - checkSubtries: for _, hash := range added { - for _, acc := range srcAccounts { - if hash == crypto.Keccak256Hash(acc.code) { - continue checkSubtries // skip trie check of code nodes. - } + if isCode(hash) { + continue } // Can't use checkStateConsistency here because subtrie keys may have odd // length and crash in LeafKey. @@ -351,13 +386,25 @@ func TestIncompleteStateSync(t *testing.T) { } // Sanity check that removing any node from the database is detected for _, node := range added[1:] { - key := node.Bytes() - value, _ := dstDb.Get(key) - - dstDb.Delete(key) + var ( + key = node.Bytes() + code = isCode(node) + val []byte + ) + if code { + val = rawdb.ReadCode(dstDb, node) + rawdb.DeleteCode(dstDb, node) + } else { + val = rawdb.ReadTrieNode(dstDb, node) + rawdb.DeleteTrieNode(dstDb, node) + } if err := checkStateConsistency(dstDb, added[0]); err == nil { t.Fatalf("trie inconsistency not caught, missing: %x", key) } - dstDb.Put(key, value) + if code { + rawdb.WriteCode(dstDb, node, val) + } else { + rawdb.WriteTrieNode(dstDb, node, val) + } } } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index f87d6fbea9..4fca734e65 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // testTxPoolConfig is a transaction pool configuration without stateful disk @@ -54,7 +55,7 @@ type testBlockChain struct { func (bc *testBlockChain) CurrentBlock() *types.Block { return types.NewBlock(&types.Header{ GasLimit: bc.gasLimit, - }, nil, nil, nil) + }, nil, nil, nil, new(trie.Trie)) } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { diff --git a/core/types/block.go b/core/types/block.go index 8316cd7f3a..8096ebb755 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -35,7 +35,7 @@ import ( ) var ( - EmptyRootHash = DeriveSha(Transactions{}) + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") EmptyUncleHash = rlpHash([]*Header(nil)) ) @@ -221,14 +221,14 @@ type storageblock struct { // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block { +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher) *Block { b := &Block{header: CopyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { b.header.TxHash = EmptyRootHash } else { - b.header.TxHash = DeriveSha(Transactions(txs)) + b.header.TxHash = DeriveSha(Transactions(txs), hasher) b.transactions = make(Transactions, len(txs)) copy(b.transactions, txs) } @@ -236,7 +236,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* if len(receipts) == 0 { b.header.ReceiptHash = EmptyRootHash } else { - b.header.ReceiptHash = DeriveSha(Receipts(receipts)) + b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) } diff --git a/core/types/block_test.go b/core/types/block_test.go index 46ad00c6eb..4dfdcf9545 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -18,6 +18,7 @@ package types import ( "bytes" + "hash" "math/big" "reflect" "testing" @@ -27,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) // from bcValidBlockTest.json, "SimpleTx" @@ -90,6 +92,30 @@ func BenchmarkEncodeBlock(b *testing.B) { } } +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + func makeBenchBlock() *Block { var ( key, _ = crypto.GenerateKey() @@ -128,5 +154,5 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts) + return NewBlock(header, txs, uncles, receipts, newHasher()) } diff --git a/core/types/derive_sha.go b/core/types/derive_sha.go index 00c42c5bc6..7d40c7f660 100644 --- a/core/types/derive_sha.go +++ b/core/types/derive_sha.go @@ -21,21 +21,28 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" ) +// DerivableList is the interface which can derive the hash. type DerivableList interface { Len() int GetRlp(i int) []byte } -func DeriveSha(list DerivableList) common.Hash { +// Hasher is the tool used to calculate the hash of derivable list. +type Hasher interface { + Reset() + Update([]byte, []byte) + Hash() common.Hash +} + +func DeriveSha(list DerivableList, hasher Hasher) common.Hash { + hasher.Reset() keybuf := new(bytes.Buffer) - trie := new(trie.Trie) for i := 0; i < list.Len(); i++ { keybuf.Reset() rlp.Encode(keybuf, uint(i)) - trie.Update(keybuf.Bytes(), list.GetRlp(i)) + hasher.Update(keybuf.Bytes(), list.GetRlp(i)) } - return trie.Hash() + return hasher.Hash() } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f3b0926d77..59b5abaa60 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -109,7 +109,7 @@ type Downloader struct { peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node existence checks + stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 87225cb625..aba4d5dbf7 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -771,7 +772,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi q.lock.Lock() defer q.lock.Unlock() validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash { + if types.DeriveSha(types.Transactions(txLists[index]), new(trie.Trie)) != header.TxHash { return errInvalidBody } if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { @@ -796,7 +797,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, q.lock.Lock() defer q.lock.Unlock() validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { + if types.DeriveSha(types.Receipts(receiptList[index]), new(trie.Trie)) != header.ReceiptHash { return errInvalidReceipt } return nil diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 25c8fccb5b..bf9e96fe2a 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -474,7 +474,7 @@ func (s *stateSync) process(req *stateReq) (int, error) { // Iterate over all the delivered data and inject one-by-one into the trie for _, blob := range req.response { - _, hash, err := s.processNodeData(blob) + hash, err := s.processNodeData(blob) switch err { case nil: s.numUncommitted++ @@ -512,13 +512,13 @@ func (s *stateSync) process(req *stateReq) (int, error) { // processNodeData tries to inject a trie node data blob delivered from a remote // peer into the state trie, returning whether anything useful was written or any // error occurred. -func (s *stateSync) processNodeData(blob []byte) (bool, common.Hash, error) { +func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { res := trie.SyncResult{Data: blob} s.keccak.Reset() s.keccak.Write(blob) s.keccak.Sum(res.Hash[:0]) - committed, _, err := s.sched.Process([]trie.SyncResult{res}) - return committed, res.Hash, err + err := s.sched.Process(res) + return res.Hash, err } // updateStats bumps the various state sync progress counters and displays a log diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 2c2dabad96..270aaf5918 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -540,7 +541,7 @@ func (f *BlockFetcher) loop() { announce.time = task.time // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { + if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash { log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) block := types.NewBlockWithHeader(header) @@ -619,7 +620,7 @@ func (f *BlockFetcher) loop() { continue } if txnHash == (common.Hash{}) { - txnHash = types.DeriveSha(types.Transactions(task.transactions[i])) + txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), new(trie.Trie)) } if txnHash != announce.header.TxHash { continue diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index a6854ffcf8..3220002a99 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -38,7 +39,7 @@ var ( testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddress = crypto.PubkeyToAddress(testKey.PublicKey) genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) - unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil) + unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil, new(trie.Trie)) ) // makeChain creates a chain of n blocks starting at and including parent. diff --git a/eth/handler.go b/eth/handler.go index 1a15765dda..3a051abf5e 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -608,7 +608,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested state entry, stopping if enough was found - if entry, err := pm.blockchain.TrieNode(hash); err == nil { + // todo now the code and trienode is mixed in the protocol level, + // separate these two types. + entry, err := pm.blockchain.TrieNode(hash) + if len(entry) == 0 || err != nil { + // Read the contract code with prefix only to save unnecessary lookups. + entry, err = pm.blockchain.ContractCodeWithPrefix(hash) + } + if err == nil && len(entry) > 0 { data = append(data, entry) bytes += len(entry) } @@ -703,7 +710,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash()) break // TODO(karalabe): return error eventually, but wait a few releases } - if hash := types.DeriveSha(request.Block.Transactions()); hash != request.Block.TxHash() { + if hash := types.DeriveSha(request.Block.Transactions(), new(trie.Trie)); hash != request.Block.TxHash() { log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash()) break // TODO(karalabe): return error eventually, but wait a few releases } diff --git a/les/odr_requests.go b/les/odr_requests.go index 8c1e0102f5..3cc55c98d8 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -116,7 +116,7 @@ func (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions)) { + if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), new(trie.Trie)) { return errTxHashMismatch } if r.Header.UncleHash != types.CalcUncleHash(body.Uncles) { @@ -174,7 +174,7 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.ReceiptHash != types.DeriveSha(receipt) { + if r.Header.ReceiptHash != types.DeriveSha(receipt, new(trie.Trie)) { return errReceiptHashMismatch } // Validations passed, store and return diff --git a/les/server_handler.go b/les/server_handler.go index c474363232..463f51cb43 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -489,7 +489,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { p.bumpInvalid() continue } - code, err := triedb.Node(common.BytesToHash(account.CodeHash)) + code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) if err != nil { p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) continue diff --git a/light/odr.go b/light/odr.go index 1ea98ca5aa..0b854b0b6c 100644 --- a/light/odr.go +++ b/light/odr.go @@ -101,7 +101,7 @@ type CodeRequest struct { // StoreResult stores the retrieved data in local database func (req *CodeRequest) StoreResult(db ethdb.Database) { - db.Put(req.Hash[:], req.Data) + rawdb.WriteCode(db, req.Hash, req.Data) } // BlockRequest is the ODR request type for retrieving block bodies diff --git a/light/odr_test.go b/light/odr_test.go index 78bf373e60..5f7f4d96cb 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -87,7 +87,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { t.Prove(req.Key, 0, nodes) req.Proof = nodes case *CodeRequest: - req.Data, _ = odr.sdb.Get(req.Hash[:]) + req.Data = rawdb.ReadCode(odr.sdb, req.Hash) } req.StoreResult(odr.ldb) return nil diff --git a/light/trie.go b/light/trie.go index 0d69e74e21..3eb05f4a3f 100644 --- a/light/trie.go +++ b/light/trie.go @@ -22,6 +22,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -70,7 +71,8 @@ func (db *odrDatabase) ContractCode(addrHash, codeHash common.Hash) ([]byte, err if codeHash == sha3Nil { return nil, nil } - if code, err := db.backend.Database().Get(codeHash[:]); err == nil { + code := rawdb.ReadCode(db.backend.Database(), codeHash) + if len(code) != 0 { return code, nil } id := *db.id diff --git a/miner/worker.go b/miner/worker.go index 177e727283..f042fd8e33 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) const ( @@ -711,6 +712,7 @@ func (w *worker) updateSnapshot() { w.current.txs, uncles, w.current.receipts, + new(trie.Trie), ) w.snapshotState = w.current.state.Copy() diff --git a/trie/database.go b/trie/database.go index 0e9f306e63..fa8906b7a3 100644 --- a/trie/database.go +++ b/trie/database.go @@ -27,6 +27,7 @@ import ( "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -57,15 +58,6 @@ var ( memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) ) -// secureKeyPrefix is the database key prefix used to store trie node preimages. -var secureKeyPrefix = []byte("secure-key-") - -// secureKeyPrefixLength is the length of the above prefix -const secureKeyPrefixLength = 11 - -// secureKeyLength is the length of the above prefix + 32byte hash. -const secureKeyLength = secureKeyPrefixLength + 32 - // Database is an intermediate write layer between the trie data structures and // the disk database. The aim is to accumulate trie writes in-memory and only // periodically flush a couple tries to disk, garbage collecting the remainder. @@ -78,7 +70,7 @@ type Database struct { diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs - dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes + dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes oldest common.Hash // Oldest tracked node, flush-list head newest common.Hash // Newest tracked node, flush-list tail @@ -139,8 +131,8 @@ type rawShortNode struct { func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } -// cachedNode is all the information we know about a single cached node in the -// memory database write layer. +// cachedNode is all the information we know about a single cached trie node +// in the memory database write layer. type cachedNode struct { node node // Cached collapsed trie node, or raw rlp data size uint16 // Byte size of the useful cached data @@ -161,8 +153,8 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) // reference map. const cachedNodeChildrenSize = 48 -// rlp returns the raw rlp encoded blob of the cached node, either directly from -// the cache, or by regenerating it from the collapsed node. +// rlp returns the raw rlp encoded blob of the cached trie node, either directly +// from the cache, or by regenerating it from the collapsed node. func (n *cachedNode) rlp() []byte { if node, ok := n.node.(rawNode); ok { return node @@ -183,9 +175,9 @@ func (n *cachedNode) obj(hash common.Hash) node { return expandNode(hash[:], n.node) } -// forChilds invokes the callback for all the tracked children of this node, -// both the implicit ones from inside the node as well as the explicit ones -//from outside the node. +// forChilds invokes the callback for all the tracked children of this node, +// both the implicit ones from inside the node as well as the explicit ones +// from outside the node. func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { for child := range n.children { onChild(child) @@ -305,25 +297,14 @@ func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string) } // DiskDB retrieves the persistent storage backing the trie database. -func (db *Database) DiskDB() ethdb.KeyValueReader { +func (db *Database) DiskDB() ethdb.KeyValueStore { return db.diskdb } -// InsertBlob writes a new reference tracked blob to the memory database if it's -// yet unknown. This method should only be used for non-trie nodes that require -// reference counting, since trie nodes are garbage collected directly through -// their embedded children. -func (db *Database) InsertBlob(hash common.Hash, blob []byte) { - db.lock.Lock() - defer db.lock.Unlock() - - db.insert(hash, len(blob), rawNode(blob)) -} - -// insert inserts a collapsed trie node into the memory database. This method is -// a more generic version of InsertBlob, supporting both raw blob insertions as -// well ex trie node insertions. The blob size must be specified to allow proper -// size tracking. +// insert inserts a collapsed trie node into the memory database. +// The blob size must be specified to allow proper size tracking. +// All nodes inserted by this function will be reference tracked +// and in theory should only used for **trie nodes** insertion. func (db *Database) insert(hash common.Hash, size int, node node) { // If the node's already cached, skip if _, ok := db.dirties[hash]; ok { @@ -430,39 +411,30 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { memcacheDirtyMissMeter.Mark(1) // Content unavailable in memory, attempt to retrieve from disk - enc, err := db.diskdb.Get(hash[:]) - if err == nil && enc != nil { + enc := rawdb.ReadTrieNode(db.diskdb, hash) + if len(enc) != 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) memcacheCleanWriteMeter.Mark(int64(len(enc))) } + return enc, nil } - return enc, err + return nil, errors.New("not found") } // preimage retrieves a cached trie node pre-image from memory. If it cannot be // found cached, the method queries the persistent database for the content. -func (db *Database) preimage(hash common.Hash) ([]byte, error) { +func (db *Database) preimage(hash common.Hash) []byte { // Retrieve the node from cache if available db.lock.RLock() preimage := db.preimages[hash] db.lock.RUnlock() if preimage != nil { - return preimage, nil + return preimage } - // Content unavailable in memory, attempt to retrieve from disk - return db.diskdb.Get(secureKey(hash)) -} - -// secureKey returns the database key for the preimage of key (as a newly -// allocated byte-slice) -func secureKey(hash common.Hash) []byte { - buf := make([]byte, secureKeyLength) - copy(buf, secureKeyPrefix) - copy(buf[secureKeyPrefixLength:], hash[:]) - return buf + return rawdb.ReadPreimage(db.diskdb, hash) } // Nodes retrieves the hashes of all the nodes cached within the memory database. @@ -482,6 +454,9 @@ func (db *Database) Nodes() []common.Hash { } // Reference adds a new reference from a parent node to a child node. +// This function is used to add reference between internal trie node +// and external node(e.g. storage trie root), all internal trie nodes +// are referenced together by database itself. func (db *Database) Reference(child common.Hash, parent common.Hash) { db.lock.Lock() defer db.lock.Unlock() @@ -604,27 +579,16 @@ func (db *Database) Cap(limit common.StorageSize) error { size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) - // We reuse an ephemeral buffer for the keys. The batch Put operation - // copies it internally, so we can reuse it. - var keyBuf [secureKeyLength]byte - copy(keyBuf[:], secureKeyPrefix) - // If the preimage cache got large enough, push to disk. If it's still small // leave for later to deduplicate writes. flushPreimages := db.preimagesSize > 4*1024*1024 if flushPreimages { - for hash, preimage := range db.preimages { - copy(keyBuf[secureKeyPrefixLength:], hash[:]) - if err := batch.Put(keyBuf[:], preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { return err } - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } + batch.Reset() } } // Keep committing nodes from the flush-list until we're below allowance @@ -632,9 +596,8 @@ func (db *Database) Cap(limit common.StorageSize) error { for size > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.dirties[oldest] - if err := batch.Put(oldest[:], node.rlp()); err != nil { - return err - } + rawdb.WriteTrieNode(batch, oldest, node.rlp()) + // If we exceeded the ideal batch size, commit and reset if batch.ValueSize() >= ethdb.IdealBatchSize { if err := batch.Write(); err != nil { @@ -662,8 +625,7 @@ func (db *Database) Cap(limit common.StorageSize) error { defer db.lock.Unlock() if flushPreimages { - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 } for db.oldest != oldest { node := db.dirties[db.oldest] @@ -706,25 +668,13 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H start := time.Now() batch := db.diskdb.NewBatch() - // We reuse an ephemeral buffer for the keys. The batch Put operation - // copies it internally, so we can reuse it. - var keyBuf [secureKeyLength]byte - copy(keyBuf[:], secureKeyPrefix) - // Move all of the accumulated preimages into a write batch - for hash, preimage := range db.preimages { - copy(keyBuf[secureKeyPrefixLength:], hash[:]) - if err := batch.Put(keyBuf[:], preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { return err } - // If the batch is too large, flush to disk - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } + batch.Reset() } // Since we're going to replay trie node writes into the clean cache, flush out // any batched pre-images before continuing. @@ -754,8 +704,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H batch.Reset() // Reset the storage counters and bumpd metrics - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 memcacheCommitTimeTimer.Update(time.Since(start)) memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) @@ -791,13 +740,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane if err != nil { return err } - if err := batch.Put(hash[:], node.rlp()); err != nil { - return err - } + // If we've reached an optimal batch size, commit and start over + rawdb.WriteTrieNode(batch, hash, node.rlp()) if callback != nil { callback(hash) } - // If we've reached an optimal batch size, commit and start over if batch.ValueSize() >= ethdb.IdealBatchSize { if err := batch.Write(); err != nil { return err diff --git a/trie/secure_trie.go b/trie/secure_trie.go index bd8e51d989..ae1bbc6aa9 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -130,8 +130,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte { if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { return key } - key, _ := t.trie.db.preimage(common.BytesToHash(shaKey)) - return key + return t.trie.db.preimage(common.BytesToHash(shaKey)) } // Commit writes all nodes and the secure hash pre-images to the trie's database. diff --git a/trie/sync.go b/trie/sync.go index 620e97fa30..af99466416 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" ) @@ -37,7 +38,7 @@ var ErrAlreadyProcessed = errors.New("already processed") type request struct { hash common.Hash // Hash of the node data content to retrieve data []byte // Data content of the node, cached until all subtrees complete - raw bool // Whether this is a raw entry (code) or a trie node + code bool // Whether this is a code entry parents []*request // Parent state nodes referencing this entry (notify all upon completion) depth int // Depth level within the trie the node is located to prioritise DFS @@ -46,8 +47,7 @@ type request struct { callback LeafCallback // Callback to invoke if a leaf node it reached on this branch } -// SyncResult is a simple list to return missing nodes along with their request -// hashes. +// SyncResult is a response with requested data along with it's hash. type SyncResult struct { Hash common.Hash // Hash of the originally unknown trie node Data []byte // Data content of the retrieved node @@ -56,25 +56,40 @@ type SyncResult struct { // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - batch map[common.Hash][]byte // In-memory membatch of recently completed items + nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes + codes map[common.Hash][]byte // In-memory membatch of recently completed codes } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - batch: make(map[common.Hash][]byte), + nodes: make(map[common.Hash][]byte), + codes: make(map[common.Hash][]byte), } } +// hasNode reports the trie node with specific hash is already cached. +func (batch *syncMemBatch) hasNode(hash common.Hash) bool { + _, ok := batch.nodes[hash] + return ok +} + +// hasCode reports the contract code with specific hash is already cached. +func (batch *syncMemBatch) hasCode(hash common.Hash) bool { + _, ok := batch.codes[hash] + return ok +} + // Sync is the main state trie synchronisation scheduler, which provides yet // unknown trie hashes to retrieve, accepts node data associated with said hashes // and reconstructs the trie step by step until all is done. type Sync struct { database ethdb.KeyValueReader // Persistent database to check for existing entries membatch *syncMemBatch // Memory buffer to avoid frequent database writes - requests map[common.Hash]*request // Pending requests pertaining to a key hash + nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash + codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash queue *prque.Prque // Priority queue with the pending requests - bloom *SyncBloom // Bloom filter for fast node existence checks + bloom *SyncBloom // Bloom filter for fast state existence checks } // NewSync creates a new trie data download scheduler. @@ -82,7 +97,8 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb ts := &Sync{ database: database, membatch: newSyncMemBatch(), - requests: make(map[common.Hash]*request), + nodeReqs: make(map[common.Hash]*request), + codeReqs: make(map[common.Hash]*request), queue: prque.New(nil), bloom: bloom, } @@ -96,13 +112,15 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb if root == emptyRoot { return } - if _, ok := s.membatch.batch[root]; ok { + if s.membatch.hasNode(root) { return } if s.bloom == nil || s.bloom.Contains(root[:]) { - // Bloom filter says this might be a duplicate, double check - blob, _ := s.database.Get(root[:]) - if local, err := decodeNode(root[:], blob); local != nil && err == nil { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + blob := rawdb.ReadTrieNode(s.database, root) + if len(blob) > 0 { return } // False positive, bump fault meter @@ -116,7 +134,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.requests[parent] + ancestor := s.nodeReqs[parent] if ancestor == nil { panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) } @@ -126,21 +144,25 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb s.schedule(req) } -// AddRawEntry schedules the direct retrieval of a state entry that should not be -// interpreted as a trie node, but rather accepted and stored into the database -// as is. This method's goal is to support misc state metadata retrievals (e.g. -// contract code). -func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { +// AddCodeEntry schedules the direct retrieval of a contract code that should not +// be interpreted as a trie node, but rather accepted and stored into the database +// as is. +func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) { // Short circuit if the entry is empty or already known if hash == emptyState { return } - if _, ok := s.membatch.batch[hash]; ok { + if s.membatch.hasCode(hash) { return } if s.bloom == nil || s.bloom.Contains(hash[:]) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(hash[:]); ok { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, the blob is present for sure. + // Note we only check the existence with new code scheme, fast + // sync is expected to run with a fresh new node. Even there + // exists the code with legacy format, fetch and store with + // new scheme anyway. + if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { return } // False positive, bump fault meter @@ -149,12 +171,12 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { // Assemble the new sub-trie sync request req := &request{ hash: hash, - raw: true, + code: true, depth: depth, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { - ancestor := s.requests[parent] + ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq if ancestor == nil { panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) } @@ -173,61 +195,64 @@ func (s *Sync) Missing(max int) []common.Hash { return requests } -// Process injects a batch of retrieved trie nodes data, returning if something -// was committed to the database and also the index of an entry if its processing -// failed. -func (s *Sync) Process(results []SyncResult) (bool, int, error) { - committed := false - - for i, item := range results { - // If the item was not requested, bail out - request := s.requests[item.Hash] - if request == nil { - return committed, i, ErrNotRequested - } - if request.data != nil { - return committed, i, ErrAlreadyProcessed - } - // If the item is a raw entry request, commit directly - if request.raw { - request.data = item.Data - s.commit(request) - committed = true - continue - } +// Process injects the received data for requested item. Note it can +// happpen that the single response commits two pending requests(e.g. +// there are two requests one for code and one for node but the hash +// is same). In this case the second response for the same hash will +// be treated as "non-requested" item or "already-processed" item but +// there is no downside. +func (s *Sync) Process(result SyncResult) error { + // If the item was not requested either for code or node, bail out + if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil { + return ErrNotRequested + } + // There is an pending code request for this data, commit directly + var filled bool + if req := s.codeReqs[result.Hash]; req != nil && req.data == nil { + filled = true + req.data = result.Data + s.commit(req) + } + // There is an pending node request for this data, fill it. + if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil { + filled = true // Decode the node data content and update the request - node, err := decodeNode(item.Hash[:], item.Data) + node, err := decodeNode(result.Hash[:], result.Data) if err != nil { - return committed, i, err + return err } - request.data = item.Data + req.data = result.Data // Create and schedule a request for all the children nodes - requests, err := s.children(request, node) + requests, err := s.children(req, node) if err != nil { - return committed, i, err - } - if len(requests) == 0 && request.deps == 0 { - s.commit(request) - committed = true - continue + return err } - request.deps += len(requests) - for _, child := range requests { - s.schedule(child) + if len(requests) == 0 && req.deps == 0 { + s.commit(req) + } else { + req.deps += len(requests) + for _, child := range requests { + s.schedule(child) + } } } - return committed, 0, nil + if !filled { + return ErrAlreadyProcessed + } + return nil } // Commit flushes the data stored in the internal membatch out to persistent // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw - for key, value := range s.membatch.batch { - if err := dbw.Put(key[:], value); err != nil { - return err - } + for key, value := range s.membatch.nodes { + rawdb.WriteTrieNode(dbw, key, value) + s.bloom.Add(key[:]) + } + for key, value := range s.membatch.codes { + rawdb.WriteCode(dbw, key, value) s.bloom.Add(key[:]) } // Drop the membatch data and return @@ -237,21 +262,30 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { // Pending returns the number of state entries currently pending for download. func (s *Sync) Pending() int { - return len(s.requests) + return len(s.nodeReqs) + len(s.codeReqs) } // schedule inserts a new state retrieval request into the fetch queue. If there // is already a pending request for this node, the new request will be discarded // and only a parent reference added to the old one. func (s *Sync) schedule(req *request) { + var reqset = s.nodeReqs + if req.code { + reqset = s.codeReqs + } // If we're already requesting this node, add a new reference and stop - if old, ok := s.requests[req.hash]; ok { + if old, ok := reqset[req.hash]; ok { old.parents = append(old.parents, req.parents...) return } - // Schedule the request for future retrieval + reqset[req.hash] = req + + // Schedule the request for future retrieval. This queue is shared + // by both node requests and code requests. It can happen that there + // is a trie node and code has same hash. In this case two elements + // with same hash and same or different depth will be pushed. But it's + // ok the worst case is the second response will be treated as duplicated. s.queue.Push(req.hash, int64(req.depth)) - s.requests[req.hash] = req } // children retrieves all the missing children of a state trie entry for future @@ -297,12 +331,14 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { if node, ok := (child.node).(hashNode); ok { // Try to resolve the node from the local database hash := common.BytesToHash(node) - if _, ok := s.membatch.batch[hash]; ok { + if s.membatch.hasNode(hash) { continue } if s.bloom == nil || s.bloom.Contains(node) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(node); ok { + // Bloom filter says this might be a duplicate, double check. + // If database says yes, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + if blob := rawdb.ReadTrieNode(s.database, common.BytesToHash(node)); len(blob) > 0 { continue } // False positive, bump fault meter @@ -325,10 +361,13 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // committed themselves. func (s *Sync) commit(req *request) (err error) { // Write the node content to the membatch - s.membatch.batch[req.hash] = req.data - - delete(s.requests, req.hash) - + if req.code { + s.membatch.codes[req.hash] = req.data + delete(s.codeReqs, req.hash) + } else { + s.membatch.nodes[req.hash] = req.data + delete(s.nodeReqs, req.hash) + } // Check all parents for completion for _, parent := range req.parents { parent.deps-- diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go index 3108b05935..89f61d66d9 100644 --- a/trie/sync_bloom.go +++ b/trie/sync_bloom.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -41,8 +42,8 @@ var ( ) // syncBloomHasher is a wrapper around a byte blob to satisfy the interface API -// requirements of the bloom library used. It's used to convert a trie hash into -// a 64 bit mini hash. +// requirements of the bloom library used. It's used to convert a trie hash or +// contract code hash into a 64 bit mini hash. type syncBloomHasher []byte func (f syncBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } @@ -53,9 +54,9 @@ func (f syncBloomHasher) Size() int { return 8 } func (f syncBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) } // SyncBloom is a bloom filter used during fast sync to quickly decide if a trie -// node already exists on disk or not. It self populates from the provided disk -// database on creation in a background thread and will only start returning live -// results once that's finished. +// node or contract code already exists on disk or not. It self populates from the +// provided disk database on creation in a background thread and will only start +// returning live results once that's finished. type SyncBloom struct { bloom *bloomfilter.Filter inited uint32 @@ -107,10 +108,16 @@ func (b *SyncBloom) init(database ethdb.Iteratee) { ) for it.Next() && atomic.LoadUint32(&b.closed) == 0 { // If the database entry is a trie node, add it to the bloom - if key := it.Key(); len(key) == common.HashLength { + key := it.Key() + if len(key) == common.HashLength { b.bloom.Add(syncBloomHasher(key)) bloomLoadMeter.Mark(1) } + // If the database entry is a contract code, add it to the bloom + if ok, hash := rawdb.IsCodeKey(key); ok { + b.bloom.Add(syncBloomHasher(hash)) + bloomLoadMeter.Mark(1) + } // If enough time elapsed since the last iterator swap, restart if time.Since(swap) > 8*time.Second { key := common.CopyBytes(it.Key()) diff --git a/trie/sync_test.go b/trie/sync_test.go index 6025b87fcc..34f3990576 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -124,8 +124,10 @@ func testIterativeSync(t *testing.T, count int) { } results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -160,8 +162,10 @@ func TestIterativeDelayedSync(t *testing.T) { } results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -204,8 +208,10 @@ func testIterativeRandomSync(t *testing.T, count int) { results = append(results, SyncResult{hash, data}) } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -251,8 +257,10 @@ func TestIterativeRandomDelayedSync(t *testing.T) { } } // Feed the retrieved results back and queue new tasks - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -298,8 +306,10 @@ func TestDuplicateAvoidanceSync(t *testing.T) { results[i] = SyncResult{hash, data} } - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { @@ -336,8 +346,10 @@ func TestIncompleteSync(t *testing.T) { results[i] = SyncResult{hash, data} } // Process each of the trie nodes - if _, index, err := sched.Process(results); err != nil { - t.Fatalf("failed to process result #%d: %v", index, err) + for _, result := range results { + if err := sched.Process(result); err != nil { + t.Fatalf("failed to process result %v", err) + } } batch := diskdb.NewBatch() if err := sched.Commit(batch); err != nil { diff --git a/trie/trie.go b/trie/trie.go index 78e2eff534..26c3f2c29b 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -473,3 +473,9 @@ func (t *Trie) hashRoot(db *Database) (node, node, error) { t.unhashed = 0 return hashed, cached, nil } + +// Reset drops the referenced root node and cleans all internal state. +func (t *Trie) Reset() { + t.root = nil + t.unhashed = 0 +} From 1b5a867eec711d83abfda18f7083f0c64a50f8b2 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Sat, 22 Aug 2020 18:12:04 +0200 Subject: [PATCH 012/105] core: do less lookups when writing fast-sync block bodies (#21468) --- core/blockchain.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 8434d2193f..948242ed89 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1277,6 +1277,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } // writeLive writes blockchain and corresponding receipt chain into active store. writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { + skipPresenceCheck := false batch := bc.db.NewBatch() for i, block := range blockChain { // Short circuit insertion if shutting down or processing failed @@ -1287,9 +1288,17 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if !bc.HasHeader(block.Hash(), block.NumberU64()) { return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) } - if bc.HasBlock(block.Hash(), block.NumberU64()) { - stats.ignored++ - continue + if !skipPresenceCheck { + // Ignore if the entire data is already known + if bc.HasBlock(block.Hash(), block.NumberU64()) { + stats.ignored++ + continue + } else { + // If block N is not present, neither are the later blocks. + // This should be true, but if we are mistaken, the shortcut + // here will only cause overwriting of some existing data + skipPresenceCheck = true + } } // Write all the data out into the database rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) From 0f4e7c9b0d570ff7f79b0765a0bd3737ce635e9a Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Mon, 24 Aug 2020 10:32:12 +0200 Subject: [PATCH 013/105] eth: utilize sync bloom for getNodeData (#21445) * eth/downloader, eth/handler: utilize sync bloom for getNodeData * trie: handle if bloom is nil * trie, downloader: check bloom nilness externally --- eth/downloader/downloader.go | 9 +++++++++ eth/handler.go | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 59b5abaa60..4c5b270b7c 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -283,6 +283,15 @@ func (d *Downloader) Synchronising() bool { return atomic.LoadInt32(&d.synchronising) > 0 } +// SyncBloomContains tests if the syncbloom filter contains the given hash: +// - false: the bloom definitely does not contain hash +// - true: the bloom maybe contains hash +// +// While the bloom is being initialized (or is closed), all queries will return true. +func (d *Downloader) SyncBloomContains(hash []byte) bool { + return d.stateBloom == nil || d.stateBloom.Contains(hash) +} + // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { diff --git a/eth/handler.go b/eth/handler.go index 3a051abf5e..7482a2f96e 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -610,6 +610,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Retrieve the requested state entry, stopping if enough was found // todo now the code and trienode is mixed in the protocol level, // separate these two types. + if !pm.downloader.SyncBloomContains(hash[:]) { + // Only lookup the trie node if there's chance that we actually have it + continue + } entry, err := pm.blockchain.TrieNode(hash) if len(entry) == 0 || err != nil { // Read the contract code with prefix only to save unnecessary lookups. From 3ee91b9f2e400eee382f0f1a26b6fe233c4c3f9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 24 Aug 2020 13:22:36 +0300 Subject: [PATCH 014/105] core/state/snapshot: reduce disk layer depth during generation --- core/state/snapshot/generate.go | 18 ++++++++++-------- core/state/snapshot/journal.go | 2 +- core/state/snapshot/snapshot.go | 7 +++++++ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index c3a4a552ff..cf9b2b0393 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -54,9 +54,11 @@ type generatorStats struct { // Log creates an contextual log with the given message and the context pulled // from the internally maintained statistics. -func (gs *generatorStats) Log(msg string, marker []byte) { +func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { var ctx []interface{} - + if root != (common.Hash{}) { + ctx = append(ctx, []interface{}{"root", root}...) + } // Figure out whether we're after or within an account switch len(marker) { case common.HashLength: @@ -120,7 +122,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i func (dl *diskLayer) generate(stats *generatorStats) { // If a database wipe is in operation, wait until it's done if stats.wiping != nil { - stats.Log("Wiper running, state snapshotting paused", dl.genMarker) + stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker) select { // If wiper is done, resume normal mode of operation case <-stats.wiping: @@ -137,13 +139,13 @@ func (dl *diskLayer) generate(stats *generatorStats) { accTrie, err := trie.NewSecure(dl.root, dl.triedb) if err != nil { // The account trie is missing (GC), surf the chain until one becomes available - stats.Log("Trie missing, state snapshotting paused", dl.genMarker) + stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) abort := <-dl.genAbort abort <- stats return } - stats.Log("Resuming state snapshot generation", dl.genMarker) + stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) var accMarker []byte if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that @@ -192,7 +194,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { dl.lock.Unlock() } if abort != nil { - stats.Log("Aborting state snapshot generation", accountHash[:]) + stats.Log("Aborting state snapshot generation", dl.root, accountHash[:]) abort <- stats return } @@ -230,7 +232,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { dl.lock.Unlock() } if abort != nil { - stats.Log("Aborting state snapshot generation", append(accountHash[:], storeIt.Key...)) + stats.Log("Aborting state snapshot generation", dl.root, append(accountHash[:], storeIt.Key...)) abort <- stats return } @@ -238,7 +240,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { } } if time.Since(logged) > 8*time.Second { - stats.Log("Generating state snapshot", accIt.Key) + stats.Log("Generating state snapshot", dl.root, accIt.Key) logged = time.Now() } // Some account processed, unmark the marker diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 0e73454168..fc1053f818 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -193,7 +193,7 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { dl.genAbort <- abort if stats = <-abort; stats != nil { - stats.Log("Journalling in-progress snapshot", dl.genMarker) + stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker) } } // Ensure the layer didn't get stale diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 8ea56d7314..f6c5a6a9a8 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -263,6 +263,13 @@ func (t *Tree) Cap(root common.Hash, layers int) error { if !ok { return fmt.Errorf("snapshot [%#x] is disk layer", root) } + // If the generator is still running, use a more aggressive cap + diff.origin.lock.RLock() + if diff.origin.genMarker != nil && layers > 8 { + layers = 8 + } + diff.origin.lock.RUnlock() + // Run the internal capping and discard all stale layers t.lock.Lock() defer t.lock.Unlock() From 7b5107b73ff9600c6856390cf28f98be25835e80 Mon Sep 17 00:00:00 2001 From: timcooijmans Date: Mon, 24 Aug 2020 14:42:39 +0200 Subject: [PATCH 015/105] p2p/discover: avoid dropping unverified nodes when table is almost empty (#21396) This change improves discovery behavior in small networks. Very small networks would often fail to bootstrap because all member nodes were dropping table content due to findnode failure. The check is now changed to avoid dropping nodes on findnode failure when their bucket is almost empty. It also relaxes the liveness check requirement for FINDNODE/v4 response nodes, returning unverified nodes as results when there aren't any verified nodes yet. The "findnode failed" log now reports whether the node was dropped instead of the number of results. The value of the "results" was always zero by definition. Co-authored-by: Felix Lange --- p2p/discover/lookup.go | 13 +++--- p2p/discover/table.go | 43 +++++++++++++----- p2p/discover/table_test.go | 4 +- p2p/discover/v4_udp.go | 17 ++++--- p2p/discover/v4_udp_test.go | 88 ++++++++++++++++++++++++++++++++++++- 5 files changed, 140 insertions(+), 25 deletions(-) diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index 40b271e6d9..9ab4a71ce7 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -104,9 +104,7 @@ func (it *lookup) startQueries() bool { // The first query returns nodes from the local table. if it.queries == -1 { - it.tab.mutex.Lock() - closest := it.tab.closest(it.result.target, bucketSize, false) - it.tab.mutex.Unlock() + closest := it.tab.findnodeByID(it.result.target, bucketSize, false) // Avoid finishing the lookup too quickly if table is empty. It'd be better to wait // for the table to fill in this case, but there is no good mechanism for that // yet. @@ -150,11 +148,14 @@ func (it *lookup) query(n *node, reply chan<- []*node) { } else if len(r) == 0 { fails++ it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) - it.tab.log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "results", len(r), "err", err) - if fails >= maxFindnodeFailures { - it.tab.log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) + // Remove the node from the local table if it fails to return anything useful too + // many times, but only if there are enough other nodes in the bucket. + dropped := false + if fails >= maxFindnodeFailures && it.tab.bucketLen(n.ID()) >= bucketSize/2 { + dropped = true it.tab.delete(n) } + it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "failcount", fails, "dropped", dropped, "err", err) } else if fails > 0 { // Reset failure counter because it counts _consecutive_ failures. it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 010fa47f52..56ab9368a5 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -392,22 +392,35 @@ func (tab *Table) copyLiveNodes() { } } -// closest returns the n nodes in the table that are closest to the -// given id. The caller must hold tab.mutex. -func (tab *Table) closest(target enode.ID, nresults int, checklive bool) *nodesByDistance { - // This is a very wasteful way to find the closest nodes but - // obviously correct. I believe that tree-based buckets would make - // this easier to implement efficiently. - close := &nodesByDistance{target: target} +// findnodeByID returns the n nodes in the table that are closest to the given id. +// This is used by the FINDNODE/v4 handler. +// +// The preferLive parameter says whether the caller wants liveness-checked results. If +// preferLive is true and the table contains any verified nodes, the result will not +// contain unverified nodes. However, if there are no verified nodes at all, the result +// will contain unverified nodes. +func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + // Scan all buckets. There might be a better way to do this, but there aren't that many + // buckets, so this solution should be fine. The worst-case complexity of this loop + // is O(tab.len() * nresults). + nodes := &nodesByDistance{target: target} + liveNodes := &nodesByDistance{target: target} for _, b := range &tab.buckets { for _, n := range b.entries { - if checklive && n.livenessChecks == 0 { - continue + nodes.push(n, nresults) + if preferLive && n.livenessChecks > 0 { + liveNodes.push(n, nresults) } - close.push(n, nresults) } } - return close + + if preferLive && len(liveNodes.entries) > 0 { + return liveNodes + } + return nodes } // len returns the number of nodes in the table. @@ -421,6 +434,14 @@ func (tab *Table) len() (n int) { return n } +// bucketLen returns the number of nodes in the bucket for the given ID. +func (tab *Table) bucketLen(id enode.ID) int { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + return len(tab.bucket(id).entries) +} + // bucket returns the bucket for the given node ID hash. func (tab *Table) bucket(id enode.ID) *bucket { d := enode.LogDist(tab.self().ID(), id) diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 562691e5b9..5f40c967fd 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -190,7 +190,7 @@ func checkIPLimitInvariant(t *testing.T, tab *Table) { } } -func TestTable_closest(t *testing.T) { +func TestTable_findnodeByID(t *testing.T) { t.Parallel() test := func(test *closeTest) bool { @@ -202,7 +202,7 @@ func TestTable_closest(t *testing.T) { fillTable(tab, test.All) // check that closest(Target, N) returns nodes - result := tab.closest(test.Target, test.N, false).entries + result := tab.findnodeByID(test.Target, test.N, false).entries if hasDuplicates(result) { t.Errorf("result contains duplicates") return false diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index e5b6939a48..ad23eee6b4 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -324,7 +324,16 @@ func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubke Target: target, Expiration: uint64(time.Now().Add(expiration).Unix()), }) - return nodes, <-rm.errc + // Ensure that callers don't see a timeout if the node actually responded. Since + // findnode can receive more than one neighbors response, the reply matcher will be + // active until the remote node sends enough nodes. If the remote end doesn't have + // enough nodes the reply matcher will time out waiting for the second reply, but + // there's no need for an error in that case. + err := <-rm.errc + if err == errTimeout && rm.reply != nil { + err = nil + } + return nodes, err } // RequestENR sends enrRequest to the given node and waits for a response. @@ -453,9 +462,9 @@ func (t *UDPv4) loop() { if p.from == r.from && p.ptype == r.data.Kind() && p.ip.Equal(r.ip) { ok, requestDone := p.callback(r.data) matched = matched || ok + p.reply = r.data // Remove the matcher if callback indicates that all replies have been received. if requestDone { - p.reply = r.data p.errc <- nil plist.Remove(el) } @@ -715,9 +724,7 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno // Determine closest nodes. target := enode.ID(crypto.Keccak256Hash(req.Target[:])) - t.tab.mutex.Lock() - closest := t.tab.closest(target, bucketSize, true).entries - t.tab.mutex.Unlock() + closest := t.tab.findnodeByID(target, bucketSize, true).entries // Send neighbors in chunks with at most maxNeighbors per packet // to stay below the packet size limit. diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 2b0a65736c..262e3f0ba3 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -22,6 +22,7 @@ import ( crand "crypto/rand" "encoding/binary" "errors" + "fmt" "io" "math/rand" "net" @@ -277,7 +278,7 @@ func TestUDPv4_findnode(t *testing.T) { test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.IP, time.Now()) // check that closest neighbors are returned. - expected := test.table.closest(testTarget.ID(), bucketSize, true) + expected := test.table.findnodeByID(testTarget.ID(), bucketSize, true) test.packetIn(nil, &v4wire.Findnode{Target: testTarget, Expiration: futureExp}) waitNeighbors := func(want []*node) { test.waitPacketOut(func(p *v4wire.Neighbors, to *net.UDPAddr, hash []byte) { @@ -493,6 +494,91 @@ func TestUDPv4_EIP868(t *testing.T) { }) } +// This test verifies that a small network of nodes can boot up into a healthy state. +func TestUDPv4_smallNetConvergence(t *testing.T) { + t.Parallel() + + // Start the network. + nodes := make([]*UDPv4, 4) + for i := range nodes { + var cfg Config + if i > 0 { + bn := nodes[0].Self() + cfg.Bootnodes = []*enode.Node{bn} + } + nodes[i] = startLocalhostV4(t, cfg) + defer nodes[i].Close() + } + + // Run through the iterator on all nodes until + // they have all found each other. + status := make(chan error, len(nodes)) + for i := range nodes { + node := nodes[i] + go func() { + found := make(map[enode.ID]bool, len(nodes)) + it := node.RandomNodes() + for it.Next() { + found[it.Node().ID()] = true + if len(found) == len(nodes) { + status <- nil + return + } + } + status <- fmt.Errorf("node %s didn't find all nodes", node.Self().ID().TerminalString()) + }() + } + + // Wait for all status reports. + timeout := time.NewTimer(30 * time.Second) + defer timeout.Stop() + for received := 0; received < len(nodes); { + select { + case <-timeout.C: + for _, node := range nodes { + node.Close() + } + case err := <-status: + received++ + if err != nil { + t.Error("ERROR:", err) + return + } + } + } +} + +func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 { + t.Helper() + + cfg.PrivateKey = newkey() + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, cfg.PrivateKey) + + // Prefix logs with node ID. + lprefix := fmt.Sprintf("(%s)", ln.ID().TerminalString()) + lfmt := log.TerminalFormat(false) + cfg.Log = testlog.Logger(t, log.LvlTrace) + cfg.Log.SetHandler(log.FuncHandler(func(r *log.Record) error { + t.Logf("%s %s", lprefix, lfmt.Format(r)) + return nil + })) + + // Listen. + socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}}) + if err != nil { + t.Fatal(err) + } + realaddr := socket.LocalAddr().(*net.UDPAddr) + ln.SetStaticIP(realaddr.IP) + ln.SetFallbackUDP(realaddr.Port) + udp, err := ListenV4(socket, ln, cfg) + if err != nil { + t.Fatal(err) + } + return udp +} + // dgramPipe is a fake UDP socket. It queues all sent datagrams. type dgramPipe struct { mu *sync.Mutex From 5655dce3b85ae5064e94f0957a62e593095ae87b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 25 Aug 2020 08:45:41 +0300 Subject: [PATCH 016/105] core/rawdb: only complain loudly if truncating many items --- core/rawdb/freezer_table.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index e11a27430f..b9d8a274a8 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -330,7 +330,8 @@ func (t *freezerTable) truncate(items uint64) error { defer t.lock.Unlock() // If our item count is correct, don't do anything - if atomic.LoadUint64(&t.items) <= items { + existing := atomic.LoadUint64(&t.items) + if existing <= items { return nil } // We need to truncate, save the old size for metrics tracking @@ -339,7 +340,11 @@ func (t *freezerTable) truncate(items uint64) error { return err } // Something's out of sync, truncate the table's offset index - t.logger.Warn("Truncating freezer table", "items", t.items, "limit", items) + log := t.logger.Debug + if existing > items+1 { + log = t.logger.Warn // Only loud warn if we delete multiple items + } + log("Truncating freezer table", "items", existing, "limit", items) if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil { return err } From 341f4510830f169653f47a3428037c612fdd0e36 Mon Sep 17 00:00:00 2001 From: Shude Li Date: Tue, 25 Aug 2020 16:38:56 +0800 Subject: [PATCH 017/105] graphql: add support for retrieving the chain id (#21451) --- graphql/graphql.go | 4 ++++ graphql/schema.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/graphql/graphql.go b/graphql/graphql.go index 1479ae7fdb..559da8aaaa 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1044,6 +1044,10 @@ func (r *Resolver) ProtocolVersion(ctx context.Context) (int32, error) { return int32(r.backend.ProtocolVersion()), nil } +func (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) { + return hexutil.Big(*r.backend.ChainConfig().ChainID), nil +} + // SyncState represents the synchronisation status returned from the `syncing` accessor. type SyncState struct { progress ethereum.SyncProgress diff --git a/graphql/schema.go b/graphql/schema.go index 5dec10db20..d7b253f227 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -314,6 +314,8 @@ const schema string = ` protocolVersion: Int! # Syncing returns information on the current synchronisation state. syncing: SyncState + # ChainID returns the current chain ID for transaction replay protection. + chainID: BigInt! } type Mutation { From ce5f94920d921a13f3f0f7a3bffa9cfdd6f22016 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 25 Aug 2020 13:02:51 +0300 Subject: [PATCH 018/105] params: update CHTs for v1.9.20 release --- params/config.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/params/config.go b/params/config.go index 964e8c1f9e..c935405e7f 100644 --- a/params/config.go +++ b/params/config.go @@ -73,10 +73,10 @@ var ( // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 323, - SectionHead: common.HexToHash("0x2cab24d8502fb936799e4775c43b66eaec2981c9458f4676129b38bbd535a061"), - CHTRoot: common.HexToHash("0x30e9008a3e038a0e6b6d93cfc1726bdfdc40590a75e6dbe4feeafee2d7281ae6"), - BloomRoot: common.HexToHash("0x0fa8b4a19b77a454d1994864520bb8b427c829ac76967956c4ddddefe0407bf1"), + SectionIndex: 326, + SectionHead: common.HexToHash("0xbdec9f7056159360d64d6488ee11a0db574a67757cddd6fffd6719121d5733a5"), + CHTRoot: common.HexToHash("0xf9d2617f8e038b824a256025f01af3b3da681987df29dbfe718ad4c6c8a0875d"), + BloomRoot: common.HexToHash("0x712016984cfb66c165fdaf05c6a4aa89f08e4bb66fa77b199f2878fff4232d78"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -112,10 +112,10 @@ var ( // RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. RopstenTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 257, - SectionHead: common.HexToHash("0x2193034371f50352e412a763b14d53ffafbb5b9c12d1835516fb04f5a3498a9c"), - CHTRoot: common.HexToHash("0x9a4601d7893e469f4987a3ac9450b4953ca7302689770d1840542a3fe0a8c7c1"), - BloomRoot: common.HexToHash("0x198d72f8a47694682367981ae8d9988d6b30c4f433425399726dc53357a79e6f"), + SectionIndex: 260, + SectionHead: common.HexToHash("0xdcf714d033b8be3f0786515649d76e526157f811e5ae89c59dbfd53029d0d165"), + CHTRoot: common.HexToHash("0x987759454d404cd393a6a7743da64610076f167e989ec2cf9e0c0be6578d1304"), + BloomRoot: common.HexToHash("0xb8ee6d34cc30d61410717e2dc1af3294bc056f4b32a5eed5f6f386a8c1daa2b1"), } // RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle. @@ -154,10 +154,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 212, - SectionHead: common.HexToHash("0x47494484fe9696dfc7a351575b6b333566c79d2fad2a1f586165f58f3c2a553b"), - CHTRoot: common.HexToHash("0x6ec314ba06ce6e46c1be675dabbabc6fae464d394253e1647ba73480eb46d11d"), - BloomRoot: common.HexToHash("0x4ad93f0ddbe55baae629853971c6fd0de201ddef9f04892c05a1258fbacc88ca"), + SectionIndex: 214, + SectionHead: common.HexToHash("0x297b4daf21db636e76555c9d3e302d79a8efe3a3434143b9bcf61187ce8abcb1"), + CHTRoot: common.HexToHash("0x602044234a4ba8534286240200cde6e5797ae40151cbdd2dbf8eb8c0486a2c63"), + BloomRoot: common.HexToHash("0x9ccf6840ecc541b290c7b9f19edcba3e5f39206b05cd4ae5a7754040783d47d9"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -194,10 +194,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 96, - SectionHead: common.HexToHash("0xa038699854f0aa1624da55646e0219df4e319738319e65c38a270edcb7819a2a"), - CHTRoot: common.HexToHash("0x388fd4c8b58b76b76c2575211f5a6b83bce2be7ce628a750f67e4853680fa76a"), - BloomRoot: common.HexToHash("0xa8a61388f1240ea1d32413be1bdb056352c13e59278b6b31467bca77fb903fbc"), + SectionIndex: 99, + SectionHead: common.HexToHash("0xc9f09369acd657d5f77e6a389a68f673bf909ad98c269800c08229d75c1a90e3"), + CHTRoot: common.HexToHash("0x523218630348e98fa9f4e7fc3054aff717982d79c700cbecf5730c1479f21c6e"), + BloomRoot: common.HexToHash("0x75219ad4a3ec4682b89dd248ee56b52ef26fe577a426f4813297550deb5c4cb2"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. From 979fc96899c77876e15807005eadd936da17b6c2 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 25 Aug 2020 16:20:37 +0200 Subject: [PATCH 019/105] params: release Geth v1.9.20 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index b58e32649d..5102f2fb7b 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 9 // Minor version component of the current release - VersionPatch = 20 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 9 // Minor version component of the current release + VersionPatch = 20 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From fc20680b95da65f952012f3370e5d316f0ba237d Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 25 Aug 2020 16:21:41 +0200 Subject: [PATCH 020/105] params: begin v1.9.21 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 5102f2fb7b..f827bcd62e 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 9 // Minor version component of the current release - VersionPatch = 20 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 9 // Minor version component of the current release + VersionPatch = 21 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 92b12ee6c6ebf32507a2834b2913bd557a416209 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 26 Aug 2020 09:37:00 +0200 Subject: [PATCH 021/105] accounts/abi/bind/backends: Disallow AdjustTime for non-empty blocks (#21334) * accounts/abi/bind/backends: Disallow timeshift for non-empty blocks * accounts/abi/bind/backends: added tests for adjust time * accounts/abi/bind/simulated: added comments, fixed test for AdjustTime * accounts/abi/bind/backends: updated comment --- accounts/abi/bind/backends/simulated.go | 8 ++-- accounts/abi/bind/backends/simulated_test.go | 41 +++++++++++++++++++- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 4b9372a201..973d95531d 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -675,14 +675,16 @@ func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *type } // AdjustTime adds a time shift to the simulated clock. +// It can only be called on empty blocks. func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { b.mu.Lock() defer b.mu.Unlock() + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("Could not adjust time on non-empty block") + } + blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { - for _, tx := range b.pendingBlock.Transactions() { - block.AddTx(tx) - } block.OffsetTime(int64(adjustment.Seconds())) }) statedb, _ := b.blockchain.State() diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index d14a88e8bb..9087d74bc6 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -143,8 +143,7 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { defer sim.Close() prevTime := sim.pendingBlock.Time() - err := sim.AdjustTime(time.Second) - if err != nil { + if err := sim.AdjustTime(time.Second); err != nil { t.Error(err) } newTime := sim.pendingBlock.Time() @@ -154,6 +153,44 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { } } +func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + // Create tx and send + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx) + // AdjustTime should fail on non-empty block + if err := sim.AdjustTime(time.Second); err == nil { + t.Error("Expected adjust time to error on non-empty block") + } + sim.Commit() + + prevTime := sim.pendingBlock.Time() + if err := sim.AdjustTime(time.Minute); err != nil { + t.Error(err) + } + newTime := sim.pendingBlock.Time() + if newTime-prevTime != uint64(time.Minute.Seconds()) { + t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) + } + // Put a transaction after adjusting time + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx2) + sim.Commit() + newTime = sim.pendingBlock.Time() + if newTime-prevTime >= uint64(time.Minute.Seconds()) { + t.Errorf("time adjusted, but shouldn't be: prev: %v, new: %v", prevTime, newTime) + } +} + func TestSimulatedBackend_BalanceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) expectedBal := big.NewInt(10000000000) From d8da0b3d81d6623e0e500de11f50c2858e1fb9e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 26 Aug 2020 13:05:06 +0300 Subject: [PATCH 022/105] core/state, eth, trie: stabilize memory use, fix memory leak --- core/state/statedb.go | 2 +- core/state/sync.go | 6 ++-- eth/downloader/downloader.go | 11 +++++-- trie/committer.go | 4 +-- trie/sync.go | 60 +++++++++++++++++++++++++----------- trie/trie.go | 2 +- trie/trie_test.go | 2 +- 7 files changed, 58 insertions(+), 29 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index cd020e6543..36f7d863af 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -847,7 +847,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // The onleaf func is called _serially_, so we can reuse the same account // for unmarshalling every time. var account Account - root, err := s.trie.Commit(func(leaf []byte, parent common.Hash) error { + root, err := s.trie.Commit(func(path []byte, leaf []byte, parent common.Hash) error { if err := rlp.DecodeBytes(leaf, &account); err != nil { return nil } diff --git a/core/state/sync.go b/core/state/sync.go index 052cfad7bb..1018b78e5e 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -28,13 +28,13 @@ import ( // NewStateSync create a new state trie download scheduler. func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom) *trie.Sync { var syncer *trie.Sync - callback := func(leaf []byte, parent common.Hash) error { + callback := func(path []byte, leaf []byte, parent common.Hash) error { var obj Account if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { return err } - syncer.AddSubTrie(obj.Root, 64, parent, nil) - syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), 64, parent) + syncer.AddSubTrie(obj.Root, path, parent, nil) + syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent) return nil } syncer = trie.NewSync(root, database, callback, bloom) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 4c5b270b7c..f5bdb3c234 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1611,7 +1611,13 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. sync := d.syncState(latest.Root) - defer sync.Cancel() + defer func() { + // The `sync` object is replaced every time the pivot moves. We need to + // defer close the very last active one, hence the lazy evaluation vs. + // calling defer sync.Cancel() !!! + sync.Cancel() + }() + closeOnErr := func(s *stateSync) { if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { d.queue.Close() // wake up Results @@ -1674,9 +1680,8 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { // If new pivot block found, cancel old state retrieval and restart if oldPivot != P { sync.Cancel() - sync = d.syncState(P.Header.Root) - defer sync.Cancel() + go closeOnErr(sync) oldPivot = P } diff --git a/trie/committer.go b/trie/committer.go index 2f3d2a4633..fc8b7ceda5 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -226,12 +226,12 @@ func (c *committer) commitLoop(db *Database) { switch n := n.(type) { case *shortNode: if child, ok := n.Val.(valueNode); ok { - c.onleaf(child, hash) + c.onleaf(nil, child, hash) } case *fullNode: for i := 0; i < 16; i++ { if child, ok := n.Children[i].(valueNode); ok { - c.onleaf(child, hash) + c.onleaf(nil, child, hash) } } } diff --git a/trie/sync.go b/trie/sync.go index af99466416..147307fe71 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -34,14 +34,19 @@ var ErrNotRequested = errors.New("not requested") // node it already processed previously. var ErrAlreadyProcessed = errors.New("already processed") +// maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The +// role of this value is to limit the number of trie nodes that get expanded in +// memory if the node was configured with a significant number of peers. +const maxFetchesPerDepth = 16384 + // request represents a scheduled or already in-flight state retrieval request. type request struct { + path []byte // Merkle path leading to this node for prioritization hash common.Hash // Hash of the node data content to retrieve data []byte // Data content of the node, cached until all subtrees complete code bool // Whether this is a code entry parents []*request // Parent state nodes referencing this entry (notify all upon completion) - depth int // Depth level within the trie the node is located to prioritise DFS deps int // Number of dependencies before allowed to commit this node callback LeafCallback // Callback to invoke if a leaf node it reached on this branch @@ -89,6 +94,7 @@ type Sync struct { nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash queue *prque.Prque // Priority queue with the pending requests + fetches map[int]int // Number of active fetches per trie node depth bloom *SyncBloom // Bloom filter for fast state existence checks } @@ -100,14 +106,15 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb nodeReqs: make(map[common.Hash]*request), codeReqs: make(map[common.Hash]*request), queue: prque.New(nil), + fetches: make(map[int]int), bloom: bloom, } - ts.AddSubTrie(root, 0, common.Hash{}, callback) + ts.AddSubTrie(root, nil, common.Hash{}, callback) return ts } // AddSubTrie registers a new trie to the sync code, rooted at the designated parent. -func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) { +func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) { // Short circuit if the trie is empty or already known if root == emptyRoot { return @@ -128,8 +135,8 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb } // Assemble the new sub-trie sync request req := &request{ + path: path, hash: root, - depth: depth, callback: callback, } // If this sub-trie has a designated parent, link them together @@ -147,7 +154,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb // AddCodeEntry schedules the direct retrieval of a contract code that should not // be interpreted as a trie node, but rather accepted and stored into the database // as is. -func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) { +func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { // Short circuit if the entry is empty or already known if hash == emptyState { return @@ -170,9 +177,9 @@ func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) { } // Assemble the new sub-trie sync request req := &request{ - hash: hash, - code: true, - depth: depth, + path: path, + hash: hash, + code: true, } // If this sub-trie has a designated parent, link them together if parent != (common.Hash{}) { @@ -190,7 +197,18 @@ func (s *Sync) AddCodeEntry(hash common.Hash, depth int, parent common.Hash) { func (s *Sync) Missing(max int) []common.Hash { var requests []common.Hash for !s.queue.Empty() && (max == 0 || len(requests) < max) { - requests = append(requests, s.queue.PopItem().(common.Hash)) + // Retrieve th enext item in line + item, prio := s.queue.Peek() + + // If we have too many already-pending tasks for this depth, throttle + depth := int(prio >> 56) + if s.fetches[depth] > maxFetchesPerDepth { + break + } + // Item is allowed to be scheduled, add it to the task list + s.queue.Pop() + s.fetches[depth]++ + requests = append(requests, item.(common.Hash)) } return requests } @@ -285,7 +303,11 @@ func (s *Sync) schedule(req *request) { // is a trie node and code has same hash. In this case two elements // with same hash and same or different depth will be pushed. But it's // ok the worst case is the second response will be treated as duplicated. - s.queue.Push(req.hash, int64(req.depth)) + prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents + for i := 0; i < 14 && i < len(req.path); i++ { + prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order + } + s.queue.Push(req.hash, prio) } // children retrieves all the missing children of a state trie entry for future @@ -293,23 +315,23 @@ func (s *Sync) schedule(req *request) { func (s *Sync) children(req *request, object node) ([]*request, error) { // Gather all the children of the node, irrelevant whether known or not type child struct { - node node - depth int + path []byte + node node } var children []child switch node := (object).(type) { case *shortNode: children = []child{{ - node: node.Val, - depth: req.depth + len(node.Key), + node: node.Val, + path: append(append([]byte(nil), req.path...), node.Key...), }} case *fullNode: for i := 0; i < 17; i++ { if node.Children[i] != nil { children = append(children, child{ - node: node.Children[i], - depth: req.depth + 1, + node: node.Children[i], + path: append(append([]byte(nil), req.path...), byte(i)), }) } } @@ -322,7 +344,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // Notify any external watcher of a new key/value node if req.callback != nil { if node, ok := (child.node).(valueNode); ok { - if err := req.callback(node, req.hash); err != nil { + if err := req.callback(req.path, node, req.hash); err != nil { return nil, err } } @@ -346,9 +368,9 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { } // Locally unknown node, schedule for retrieval requests = append(requests, &request{ + path: child.path, hash: hash, parents: []*request{req}, - depth: child.depth, callback: req.callback, }) } @@ -364,9 +386,11 @@ func (s *Sync) commit(req *request) (err error) { if req.code { s.membatch.codes[req.hash] = req.data delete(s.codeReqs, req.hash) + s.fetches[len(req.path)]-- } else { s.membatch.nodes[req.hash] = req.data delete(s.nodeReqs, req.hash) + s.fetches[len(req.path)]-- } // Check all parents for completion for _, parent := range req.parents { diff --git a/trie/trie.go b/trie/trie.go index 26c3f2c29b..7ccd37f872 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -38,7 +38,7 @@ var ( // LeafCallback is a callback type invoked when a trie operation reaches a leaf // node. It's used by state sync and commit to allow handling external references // between account and storage tries. -type LeafCallback func(leaf []byte, parent common.Hash) error +type LeafCallback func(path []byte, leaf []byte, parent common.Hash) error // Trie is a Merkle Patricia Trie. // The zero value is an empty trie with no database. diff --git a/trie/trie_test.go b/trie/trie_test.go index 588562146a..2356b7a746 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -565,7 +565,7 @@ func BenchmarkCommitAfterHash(b *testing.B) { benchmarkCommitAfterHash(b, nil) }) var a account - onleaf := func(leaf []byte, parent common.Hash) error { + onleaf := func(path []byte, leaf []byte, parent common.Hash) error { rlp.DecodeBytes(leaf, &a) return nil } From 16d7eae1c8390576057a859ea4330fba895dca40 Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Wed, 26 Aug 2020 12:20:12 +0200 Subject: [PATCH 023/105] eth: updated comments (#21490) --- eth/api.go | 2 +- eth/api_tracer.go | 2 +- eth/bloombits.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eth/api.go b/eth/api.go index b3415f923c..d65a8efa0d 100644 --- a/eth/api.go +++ b/eth/api.go @@ -354,7 +354,7 @@ func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, // AccountRangeMaxResults is the maximum number of results to be returned per call const AccountRangeMaxResults = 256 -// AccountRangeAt enumerates all accounts in the given block and start point in paging request +// AccountRange enumerates all accounts in the given block and start point in paging request func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { var stateDb *state.StateDB var err error diff --git a/eth/api_tracer.go b/eth/api_tracer.go index c5a8ada109..51c2408c24 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -401,7 +401,7 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, return api.TraceBlock(ctx, blob, config) } -// TraceBadBlockByHash returns the structured logs created during the execution of +// TraceBadBlock returns the structured logs created during the execution of // EVM against a block pulled from the pool of bad ones and returns them as a JSON // object. func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { diff --git a/eth/bloombits.go b/eth/bloombits.go index f8b77f9cff..bd34bd7b69 100644 --- a/eth/bloombits.go +++ b/eth/bloombits.go @@ -137,7 +137,7 @@ func (b *BloomIndexer) Commit() error { return batch.Write() } -// PruneSections returns an empty error since we don't support pruning here. +// Prune returns an empty error since we don't support pruning here. func (b *BloomIndexer) Prune(threshold uint64) error { return nil } From 856307d8bbbe20e6d45f4a2a017233a5543b30f7 Mon Sep 17 00:00:00 2001 From: ucwong Date: Wed, 26 Aug 2020 21:53:12 +0800 Subject: [PATCH 024/105] go.mod | goleveldb latest update (#21448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * go.mod | goleveldb latest update * go.mod update * leveldb options * go.mod: double check Co-authored-by: Péter Szilágyi --- go.mod | 11 +++++------ go.sum | 60 +++++++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 54 insertions(+), 17 deletions(-) mode change 100644 => 100755 go.mod mode change 100644 => 100755 go.sum diff --git a/go.mod b/go.mod old mode 100644 new mode 100755 index 2299eb5017..0b22fe6389 --- a/go.mod +++ b/go.mod @@ -25,9 +25,8 @@ require ( github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect github.com/go-stack/stack v1.8.0 - github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c + github.com/golang/protobuf v1.4.2 github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 - github.com/google/go-cmp v0.3.1 // indirect github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 github.com/hashicorp/golang-lru v0.5.4 @@ -55,14 +54,14 @@ require ( github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect github.com/stretchr/testify v1.4.0 - github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect + golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd - golang.org/x/text v0.3.2 + golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 + golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 diff --git a/go.sum b/go.sum old mode 100644 new mode 100755 index 4c46eeb5af..695a9d75c9 --- a/go.sum +++ b/go.sum @@ -67,6 +67,8 @@ github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepB github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= @@ -81,14 +83,22 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c h1:zqAKixg3cTcIasAMJV+EcfVbWwLpOZ7LeoWJvcuD/5Q= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 h1:lMm2hD9Fy0ynom5+85/pbdkiYcBqM1JWmhpAXLmy0fw= github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= @@ -136,15 +146,19 @@ github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hz github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= @@ -184,8 +198,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -197,8 +211,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -206,14 +222,34 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -228,8 +264,10 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= From 05280a7ae3f47adc8aeb9130c7f5404a42fb3a55 Mon Sep 17 00:00:00 2001 From: libotony Date: Thu, 27 Aug 2020 17:33:45 +0800 Subject: [PATCH 025/105] eth/tracers: revert reason in call_tracer + error for failed internal calls (#21387) * tests: add testdata of call tracer * eth/tracers: return revert reason in call_tracer * eth/tracers: regenerate assets * eth/tracers: add error message even if no exec occurrs, fixes #21438 Co-authored-by: Martin Holst Swende --- eth/tracers/internal/tracers/assets.go | 12 ++-- eth/tracers/internal/tracers/call_tracer.js | 15 ++-- .../testdata/call_tracer_inner_instafail.json | 72 +++++++++++++++++++ .../testdata/call_tracer_revert_reason.json | 64 +++++++++++++++++ eth/tracers/tracers_test.go | 24 ++++++- 5 files changed, 172 insertions(+), 15 deletions(-) create mode 100644 eth/tracers/testdata/call_tracer_inner_instafail.json create mode 100644 eth/tracers/testdata/call_tracer_revert_reason.json diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index d0a0bf7c1a..c2da1ed1f8 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -2,8 +2,8 @@ // sources: // 4byte_tracer.js (2.933kB) // bigram_tracer.js (1.712kB) -// call_tracer.js (8.643kB) -// evmdis_tracer.js (4.194kB) +// call_tracer.js (8.704kB) +// evmdis_tracer.js (4.195kB) // noop_tracer.js (1.271kB) // opcount_tracer.js (1.372kB) // prestate_tracer.js (4.234kB) @@ -117,7 +117,7 @@ func bigram_tracerJs() (*asset, error) { return a, nil } -var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x24\x0f\xb5\x84\x28\x92\x93\xf4\xf6\x02\x76\xd5\x0b\x5d\x47\x49\x0d\xb8\x71\x60\x2b\x0d\x82\x20\x0f\xd4\xee\xac\xc4\x9a\x4b\x6e\x49\xae\xe4\x3d\xa9\xbf\xfb\xc1\x0c\xb9\xab\xd5\x1f\x3b\x6e\x0f\xce\x41\xcf\x8b\xa0\x5d\xce\x0c\x87\x33\xbf\xf9\xc7\x1d\x8d\xe0\xcc\x14\x95\x95\x8b\xa5\x87\x97\xc7\x2f\xfe\x17\x66\x4b\x84\x85\x79\x8e\x7e\x89\x16\xcb\x1c\x26\xa5\x5f\x1a\xeb\xba\xa3\x11\xcc\x96\xd2\x41\x26\x15\x82\x74\x50\x08\xeb\xc1\x64\xe0\x77\xe8\x95\x9c\x5b\x61\xab\x61\x77\x34\x0a\x3c\x07\x97\x49\x42\x66\x11\xc1\x99\xcc\xaf\x85\xc5\x13\xa8\x4c\x09\x89\xd0\x60\x31\x95\xce\x5b\x39\x2f\x3d\x82\xf4\x20\x74\x3a\x32\x16\x72\x93\xca\xac\x22\x91\xd2\x43\xa9\x53\xb4\xbc\xb5\x47\x9b\xbb\x5a\x8f\xb7\xef\x3e\xc0\x05\x3a\x87\x16\xde\xa2\x46\x2b\x14\xbc\x2f\xe7\x4a\x26\x70\x21\x13\xd4\x0e\x41\x38\x28\xe8\x8d\x5b\x62\x0a\x73\x16\x47\x8c\x6f\x48\x95\xeb\xa8\x0a\xbc\x31\xa5\x4e\x85\x97\x46\x0f\x00\x25\x69\x0e\x2b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x0d\xeb\x23\x0c\xb2\x39\x77\x0a\x52\xf3\x36\x4b\x53\x20\xf8\xa5\xf0\x74\xea\xb5\x54\x0a\xe6\x08\xa5\xc3\xac\x54\x03\x92\x36\x2f\x3d\x7c\x3c\x9f\xfd\x7c\xf9\x61\x06\x93\x77\x9f\xe0\xe3\xe4\xea\x6a\xf2\x6e\xf6\xe9\x14\xd6\xd2\x2f\x4d\xe9\x01\x57\x18\x44\xc9\xbc\x50\x12\x53\x58\x0b\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xff\xf3\x8b\xf3\xd9\x27\x30\x16\xde\x9c\xcf\xde\x4d\xaf\xaf\xe1\xcd\xe5\x15\x4c\xe0\xfd\xe4\x6a\x76\x7e\xf6\xe1\x62\x72\x05\xef\x3f\x5c\xbd\xbf\xbc\x9e\x0e\xe1\x1a\x49\x2b\x24\xfe\x6f\xdb\x3c\x63\xef\x59\x84\x14\xbd\x90\xca\xd5\x96\xf8\x64\x4a\x70\x4b\x53\xaa\x14\x96\x62\x85\x60\x31\x41\xb9\xc2\x14\x04\x24\xa6\xa8\x1e\xed\x54\x92\x25\x94\xd1\x0b\x3e\xf3\xbd\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xaf\xd7\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x96\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xae\x90\x49\x20\xd0\x44\x7b\x4e\x7f\xfd\x05\xf0\x16\x93\x32\x48\xea\x34\x42\x4e\xe0\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\x37\x0e\xd6\x4b\xb6\x28\xac\xf1\x68\x85\xf0\x5b\xe9\x7c\x8b\x26\xb3\x26\x07\xa1\xc1\x94\x84\xf8\xb6\x75\xa4\xf6\x86\x05\x0a\xfa\xaf\xd1\xb2\x46\xc3\x6e\xa7\x61\x3e\x81\x4c\x28\x87\x71\x5f\xe7\xb1\xa0\xd3\x48\xbd\x32\x37\x24\xd9\x58\x82\xb0\xad\xc0\x14\x89\x49\x63\x30\xd0\x39\x9a\x63\xa0\x1b\x76\x3b\xc4\x77\x02\x59\xa9\x79\xdb\x9e\x32\x8b\x01\xa4\xf3\x3e\x7c\xed\x76\x48\xec\x99\x28\x7c\x69\x91\xed\x89\xd6\x1a\xeb\x40\xe6\x39\xa6\x52\x78\x54\x55\xb7\xd3\x59\x09\x1b\x16\x60\x0c\xca\x2c\x86\x0b\xf4\x53\x7a\xec\xf5\x4f\xbb\x9d\x8e\xcc\xa0\x17\x56\x9f\x8c\xc7\x9c\x7d\x32\xa9\x31\x0d\xe2\x3b\x7e\x29\xdd\x30\x13\xa5\xf2\xcd\xbe\xc4\xd4\xb1\xe8\x4b\xab\xe9\xef\x5d\xd0\xe2\x23\x82\xd1\xaa\x82\x84\xb2\x8c\x98\x53\x78\xba\xca\x79\xcc\xe3\xe1\xdc\x00\x32\xe1\xc8\x84\x32\x83\x35\x42\x61\xf1\x79\xb2\x44\xf2\x9d\x4e\x30\x6a\xe9\x2a\xc7\x4e\x1d\x03\xed\x36\x34\xc5\xd0\x9b\x77\x65\x3e\x47\xdb\xeb\xc3\x77\x70\x7c\x9b\x1d\xf7\x61\x3c\xe6\x3f\xb5\xee\x91\x27\xea\x4b\x52\x4c\x11\x0f\xca\xfc\xd7\xde\x4a\xbd\x08\x67\x8d\xba\x9e\x67\x20\x40\xe3\x1a\x12\xa3\x19\xd4\xe4\x95\x39\x4a\xbd\x80\xc4\xa2\xf0\x98\x0e\x40\xa4\x29\x78\x13\x90\xd7\xe0\x6c\x7b\x4b\xf8\xee\x3b\xe8\xd1\x66\x63\x38\x3a\xbb\x9a\x4e\x66\xd3\x23\xf8\xe3\x0f\x08\x6f\x9e\x86\x37\x2f\x9f\xf6\x5b\x9a\x49\x7d\x99\x65\x51\x39\x16\x38\x2c\x10\x6f\x7a\x2f\xfa\xc3\x95\x50\x25\x5e\x66\x41\xcd\x48\x3b\xd5\x29\x8c\x23\xcf\xb3\x5d\x9e\x97\x5b\x3c\xc4\x34\x1a\xc1\xc4\x39\xcc\xe7\x0a\xf7\x03\x32\x46\x2c\x07\xaf\xf3\x94\xb1\x08\x7d\x89\xc9\x0b\x85\x84\xaa\x7a\xd7\x68\x7e\xd6\xb8\xe3\xab\x02\x4f\x00\x00\x4c\x31\xe0\x17\x14\x0b\xfc\xc2\x9b\x9f\xf1\x96\x7d\x54\x9b\x90\x50\x35\x49\x53\x8b\xce\xf5\xfa\xfd\x40\x2e\x75\x51\xfa\x93\x2d\xf2\x1c\x73\x63\xab\xa1\xa3\x84\xd4\xe3\xa3\x0d\xc2\x49\x6b\x9e\x85\x70\xe7\x9a\x78\x22\x52\xdf\x0a\xd7\xdb\x2c\x9d\x19\xe7\x4f\xea\x25\x7a\xa8\xd7\xd8\x16\xc4\x76\x74\x7c\x7b\xb4\x6f\xad\xe3\xfe\x06\x09\x2f\x7e\xe8\x13\xcb\xdd\x69\x83\xef\x26\x4d\x0c\x8b\xd2\x2d\x7b\x0c\xa7\xcd\xea\x26\x15\x8c\xc1\xdb\x12\x0f\xc2\x9f\x21\xb5\x0f\x27\x87\x2a\xa3\x5c\xe2\x6d\x99\x30\xac\x16\x82\x33\x0d\x47\xba\xa0\xcc\xeb\xca\x39\xdb\xdc\x1b\xb3\x8f\xae\x08\xae\xeb\xe9\xc5\x9b\xd7\xd3\xeb\xd9\xd5\x87\xb3\xd9\x51\x0b\x4e\x0a\x33\x4f\x4a\x6d\x9f\x41\xa1\x5e\xf8\x25\xeb\x4f\xe2\xb6\x57\x3f\x13\xcf\xf3\x17\x5f\xc2\x1b\x18\x1f\x08\xf9\xce\xc3\x1c\xf0\xf9\x0b\xcb\xbe\xdb\x37\xdf\x36\x69\x30\xe6\xd7\x00\x22\x53\xdc\xb5\x13\xc7\x81\x58\xcc\xd1\x2f\x4d\xca\xc9\x31\x11\x21\xbf\xd6\x56\x4c\x8d\xc6\x3f\x1f\x91\x93\x8b\x8b\x56\x3c\xf2\xf3\xd9\xe5\xeb\x76\x8c\x1e\xbd\x9e\x5e\x4c\xdf\x4e\x66\xd3\x5d\xda\xeb\xd9\x64\x76\x7e\xc6\x6f\xeb\xf0\x1d\x8d\xe0\xfa\x46\x16\x9c\x65\x39\x77\x99\xbc\xe0\x76\xb1\xd1\xd7\x0d\xc0\x2f\x0d\x35\x62\x36\x16\x91\x4c\xe8\xa4\x4e\xee\xae\x76\x9a\x37\xe4\x32\x53\xc7\xca\x7e\x2a\x68\x03\xb5\xdf\xb8\x51\xba\xf7\x16\xe3\xa6\x69\xcf\x9b\x5a\xaf\x8d\x41\x83\x47\x38\x01\x72\x92\xe9\x3d\xfe\x90\xf0\x7f\x70\x0c\x27\xf0\x22\x66\x92\x07\x52\xd5\x4b\x78\x46\xe2\xff\x42\xc2\x7a\x75\x80\xf3\xef\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7c\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x35\xe2\xf7\x7b\x46\x6c\xe8\x2f\x50\xef\xd3\xff\xcf\x1e\xfd\x26\xf5\x11\xaa\x4c\x01\x4f\xf6\x20\x12\x12\xcf\x93\x9d\x38\x88\xc6\xe5\x16\x87\xa5\xc1\xf8\x9e\x64\xfb\x72\x1b\xc3\xf7\x65\x8b\x7f\x29\xd9\x1e\x6c\xd5\xa8\x21\xdb\x6e\xc6\x06\x60\xd1\x5b\x89\x2b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\xac\x85\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\xb9\xa8\x68\x5c\xc9\x4a\x7d\x53\xc1\x42\x38\x48\x2b\x2d\x72\x99\xb8\x20\x8f\x1b\x5c\x8b\x0b\x61\x59\xac\xc5\xdf\x4b\x74\x34\xfb\x10\x90\x45\xe2\x4b\xa1\x54\x05\x0b\x49\x03\x0c\x71\xf7\x5e\xbe\x3a\x3e\x06\xe7\x65\x81\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x54\xd8\x1f\x76\x5b\x69\xbc\x39\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xc2\x2f\x7b\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\xde\xc2\x6d\xf0\x24\xa0\x72\x18\xa5\xd1\xd0\x77\xf9\xfa\xb2\x77\x23\xac\x50\x62\x8e\xfd\x13\x1e\x02\xd9\x56\x6b\x11\xa7\x00\x72\x0a\x14\x4a\x48\x0d\x22\x49\x4c\xa9\x3d\x19\xbe\x6e\xe8\x55\x45\xf9\xfd\xc8\xd7\xf2\x78\x5e\x12\x49\x82\xce\xd5\xe9\x9e\xbd\x46\xea\x88\x9c\xb8\x41\x6a\x27\x53\x6c\x79\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x05\xe6\xc6\xd1\x26\x73\x84\xb5\xa5\xe1\xc3\x49\x9d\xf0\xf4\x9d\x22\x59\xdb\x81\xd1\x20\x40\x19\x1e\xf9\x39\xc6\x41\xd8\x85\x1b\x86\x7c\x4f\xdb\x52\xce\xd1\x66\x3d\xdc\x06\x72\x1b\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x18\x40\x61\x0a\xce\xd3\xdf\x2a\x67\x31\x59\x5f\x4d\x7f\x9d\x5e\x35\xc5\xff\xf1\x4e\xac\xfb\xfe\xa7\xcd\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x8d\xef\x01\x14\xc9\xdf\xd4\xc6\xf7\xad\xe3\x28\xe1\xfc\xc6\x31\x0b\x0c\x33\x4d\x5b\x01\x57\x2a\xef\x76\x72\xf7\x6e\x72\x30\x45\x5d\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xad\x85\x4d\xd3\xbd\xc1\xe7\x79\xcb\xc6\x6b\x6e\xb9\x02\x51\x2b\x35\xf0\x7a\xdd\xbb\x89\x50\x0d\x58\x77\x53\x7a\x82\x03\xd5\xef\x4d\xf2\x5b\x08\xf7\xc1\xb1\xd7\x63\xfa\x9b\xcb\xc5\xb9\xf6\xbd\x7a\xf1\x5c\xc3\x73\xa8\x1f\x28\xa9\xc3\xf3\xad\x28\x3a\x90\x1d\x3b\x29\x2a\xf4\x08\x1b\x11\xa7\xb0\xf3\x8a\x04\x05\x73\xb0\xd1\x2c\xfa\xfd\xe2\x7c\x1c\xa5\x91\xc1\x9e\x58\xf4\x43\xfc\xbd\x14\xca\xf5\x8e\x9b\x66\x21\x9c\xc0\x1b\x2e\x6f\xe3\xa6\xc0\xd5\x15\x90\x78\xb6\xda\x8f\x28\x30\xb0\x45\x6b\xd4\x6c\xe9\x3c\x54\xad\x14\x1f\x94\x10\x45\xc4\xb4\xd1\xf8\x32\x02\xf3\x50\xff\xd9\x69\x13\xc0\xd3\xa6\x21\xc8\x84\x54\xa5\xc5\xa7\xa7\x70\x20\xed\xb8\xd2\x66\x22\x61\x5f\x3a\x04\x9e\x58\x1d\x38\x93\xe3\xd2\xac\x83\x02\x87\x92\xd7\x3e\x38\x1a\x1c\xec\x94\x0f\xbe\x7a\x11\x0e\x4a\x27\x16\xd8\x02\x47\x63\xf0\xda\x51\x07\xc7\xe8\xbf\x0c\x9d\x67\xcd\xe3\x37\x50\x14\x76\xf9\x26\x34\x1e\xc2\xc6\x41\x2f\xef\x75\x39\x35\x11\xf7\x3a\xad\x87\x5a\xd5\xd0\x8a\x34\xc8\xf9\x33\x7e\xff\xf7\x38\x3e\x78\x3e\xfe\x3e\x36\xd0\x76\x69\xc3\x19\xb7\x89\xc3\x49\x37\xed\xcd\xb7\x51\xd0\xac\xde\x07\x80\xfb\x3a\x27\x82\xaa\xfe\x0d\x13\xbf\x81\x2b\x37\x3b\xf4\x54\x58\x5c\x49\x53\x52\x1d\xc3\xff\xa6\xc9\xb0\xe9\xfc\xee\xba\x9d\xbb\x78\x45\xc6\xee\x6b\xdf\x91\xad\x97\xf1\x8a\x37\x34\x4d\xad\x2a\x62\xb8\xc4\xc6\x9b\xb3\x2c\x5c\xbe\x76\x98\xff\x81\xbb\xb2\x18\xef\xde\x14\xd4\x15\xc4\x22\xa5\x2c\x8a\xb4\x6a\xea\xe2\x20\xf4\x23\xb0\x14\x3a\x8d\x33\x89\x48\x53\x49\xf2\x18\x8b\xa4\xa1\x58\x08\xa9\xbb\x07\xcd\xf8\xcd\x62\x7c\x08\x19\x7b\x2d\x6e\xbb\x9e\xc6\x59\x92\x06\x3f\xd6\xb8\xfb\x88\xba\xb9\x13\x4b\xbb\xd7\x7e\xf1\xe6\xd0\x68\x57\xe6\xdc\x10\x83\x58\x09\xa9\x04\x0d\x61\xdc\x68\xe9\x14\x12\x85\x42\x87\xcb\x7e\xcc\xbc\x59\xa1\x75\xdd\x47\x80\xfc\xaf\x60\x7c\x27\x39\xd6\x8f\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x99\x37\x44\x96\xf4\xfc\x1d\x08\xb5\xef\x3e\x2e\xa4\xb8\x75\x22\x9a\x9f\xe0\xb8\xd5\x9e\xff\x5d\x82\x6c\x1f\x62\x17\x4d\x9b\x16\x0f\xef\x8d\x19\x80\x42\xc1\xc3\x52\xfd\x95\xa6\x6e\x4b\x1f\x9a\xdd\xea\xe8\x0d\x8d\xdd\x5e\xf8\xf2\xf5\xd6\x12\xeb\x8b\x90\xd0\xe1\xcf\x11\x35\x48\x8f\x56\xd0\x58\x44\xe8\x8a\x1f\x16\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\xb7\xfc\x54\x9f\xa5\x5e\x0c\xbb\x9d\xf0\xbe\x15\xef\x89\xbf\xdd\xc4\x7b\x28\x86\xcc\x19\xaf\x06\x9a\x9b\x81\xc4\xdf\x72\xd3\xc8\xd3\xf3\xce\xf5\x00\xad\xd1\xab\x30\x5a\xef\x5c\x06\x30\x63\xbc\x10\xd8\xbd\x73\xa4\x35\x7e\xb7\x05\x70\x26\x5d\x08\x17\xc4\xec\x84\x84\xbf\xdd\x8f\x88\x9a\x81\x82\xe1\xe4\x30\x03\x2d\x1d\x60\xda\xb9\xa0\x20\x62\x7e\x15\x56\x43\x61\x3f\x69\xaf\x86\x57\xf1\xa0\x32\x6f\xd9\x46\xe6\x6c\x9b\xbb\xd3\xc3\x49\xee\xb8\xc6\xe3\xe1\x64\x46\x36\x6f\x00\x7b\x0f\x6b\x7b\xe4\xd8\x27\x79\x28\x55\xb2\xf4\x3a\xb3\xdd\xc3\xca\xd2\x5b\xad\x87\xbf\x7d\xbc\xc8\x86\xb8\xad\xe2\x16\xcd\x21\x21\x31\xcf\x44\xba\x60\xd9\x5a\x40\x40\x75\xd0\x95\x11\x2d\xff\x81\x51\x62\x3b\x7e\xea\x25\xb0\x18\xbe\x43\x70\x43\x4a\xe1\x63\xe6\x5c\xfc\x4b\x47\xd3\xe4\x26\x2e\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xac\xfa\x9b\x33\x3a\x7c\x71\x42\x2b\x49\x62\xf8\xb2\x16\x3e\x72\xf3\xf7\x3e\x2d\x13\xf4\x15\x64\x28\xf8\xd3\x91\x37\x50\x08\xe7\x20\x47\x41\xd3\x69\x56\x2a\x55\x81\xb1\x29\x92\xf0\x66\x5c\xa3\x90\x34\x50\x3a\xb4\x0e\xd6\x4b\x13\xcb\x24\x77\x69\x05\x35\x9d\xd2\x0f\xe2\x8d\x8c\x74\x85\x12\x15\x48\x4f\x25\x39\x1e\xaa\x1d\xa5\xcd\xf7\x1a\xfe\xe8\x63\xa8\xea\xee\x87\x68\x3d\xd8\x6d\xc7\x28\xbf\xa6\xa7\xed\xe8\x8c\x73\xcd\x76\x5c\x6e\xee\xaa\xb6\x83\xb0\x2e\x1b\xdb\x91\xd6\x2e\x42\xdb\xe1\xc4\x2b\xfc\xb4\x1d\x48\xad\x7e\x99\x17\x18\x1c\x0d\x03\x3f\xed\x84\x16\x6b\x19\x63\x2b\x7c\x9d\x6c\xc8\xf9\x69\x10\x01\x43\x5e\xec\x91\x71\x6e\xb0\xa2\x4c\x1c\x6c\xd4\x2a\x2b\xe1\xc5\xe7\x1b\xac\xbe\x1c\xae\x22\x11\x8e\x2d\xba\xa6\x6c\xd4\x90\x0e\x6b\x0f\x04\x72\xa3\x85\x1c\x1f\x9f\x82\xfc\xb1\xcd\x50\x57\x3e\x90\xcf\x9e\xd5\x7b\xb6\xd7\x3f\xcb\x2f\x75\x74\x36\x88\xdf\x59\xef\x6f\x69\x14\x63\x24\xd0\x50\x50\x74\xef\xba\xff\x0c\x00\x00\xff\xff\x00\x24\x55\x1f\xc3\x21\x00\x00") +var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\xdf\x73\xdb\x36\xf2\x7f\x96\xfe\x8a\x4d\x1e\x6a\x69\xa2\x48\x4a\xd2\x6f\xbf\x33\x76\xd5\x1b\x9d\xa3\xa4\x9e\x71\xe3\x8c\xad\x34\x93\xc9\xe4\x01\x22\x97\x12\x6a\x08\x60\x01\xd0\x32\x2f\xf5\xff\x7e\xb3\x0b\x90\x22\x25\xd9\xf1\xf5\x6e\x6e\x7a\x6f\x24\xb1\xbb\x58\xec\x7e\xf6\x17\x38\x1a\xc1\xa9\xc9\x4b\x2b\x97\x2b\x0f\x2f\xc7\x2f\xfe\x1f\xe6\x2b\x84\xa5\x79\x8e\x7e\x85\x16\x8b\x35\x4c\x0b\xbf\x32\xd6\x75\x47\x23\x98\xaf\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\xb9\xb0\xc2\x96\xc3\xee\x68\x14\x78\x0e\x2e\x93\x84\xcc\x22\x82\x33\x99\xdf\x08\x8b\xc7\x50\x9a\x02\x12\xa1\xc1\x62\x2a\x9d\xb7\x72\x51\x78\x04\xe9\x41\xe8\x74\x64\x2c\xac\x4d\x2a\xb3\x92\x44\x4a\x0f\x85\x4e\xd1\xf2\xd6\x1e\xed\xda\x55\x7a\xbc\x7d\xf7\x01\xce\xd1\x39\xb4\xf0\x16\x35\x5a\xa1\xe0\x7d\xb1\x50\x32\x81\x73\x99\xa0\x76\x08\xc2\x41\x4e\x5f\xdc\x0a\x53\x58\xb0\x38\x62\x7c\x43\xaa\x5c\x45\x55\xe0\x8d\x29\x74\x2a\xbc\x34\x7a\x00\x28\x49\x73\xb8\x41\xeb\xa4\xd1\xf0\xaa\xda\x2a\x0a\x1c\x80\xb1\x24\xa4\x27\x3c\x1d\xc0\x82\xc9\x89\xaf\x0f\x42\x97\xa0\x84\xdf\xb2\x3e\xc2\x20\xdb\x73\xa7\x20\x35\x6f\xb3\x32\x39\x82\x5f\x09\x4f\xa7\xde\x48\xa5\x60\x81\x50\x38\xcc\x0a\x35\x20\x69\x8b\xc2\xc3\xc7\xb3\xf9\xcf\x17\x1f\xe6\x30\x7d\xf7\x09\x3e\x4e\x2f\x2f\xa7\xef\xe6\x9f\x4e\x60\x23\xfd\xca\x14\x1e\xf0\x06\x83\x28\xb9\xce\x95\xc4\x14\x36\xc2\x5a\xa1\x7d\x09\x26\x23\x09\xbf\xcc\x2e\x4f\x7f\x9e\xbe\x9b\x4f\xff\x7e\x76\x7e\x36\xff\x04\xc6\xc2\x9b\xb3\xf9\xbb\xd9\xd5\x15\xbc\xb9\xb8\x84\x29\xbc\x9f\x5e\xce\xcf\x4e\x3f\x9c\x4f\x2f\xe1\xfd\x87\xcb\xf7\x17\x57\xb3\x21\x5c\x21\x69\x85\xc4\xff\x6d\x9b\x67\xec\x3d\x8b\x90\xa2\x17\x52\xb9\xca\x12\x9f\x4c\x01\x6e\x65\x0a\x95\xc2\x4a\xdc\x20\x58\x4c\x50\xde\x60\x0a\x02\x12\x93\x97\x8f\x76\x2a\xc9\x12\xca\xe8\x25\x9f\xf9\x5e\x40\xc2\x59\x06\xda\xf8\x01\x38\x44\xf8\x71\xe5\x7d\x7e\x3c\x1a\x6d\x36\x9b\xe1\x52\x17\x43\x63\x97\x23\x15\xc4\xb9\xd1\x4f\xc3\x2e\xc9\x4c\x84\x52\x73\x2b\x12\xb4\xe4\x1c\x01\x59\x41\xe6\x57\x66\xa3\xc1\x5b\xa1\x9d\x48\xc8\xd5\xf4\x9c\x30\x18\x85\x07\xbc\xa5\x37\xef\x08\xb4\x60\x31\x37\x96\x9e\x95\xaa\x70\x26\xb5\x47\xab\x85\x62\xd9\x0e\xd6\x22\x45\x58\x94\x20\x9a\x02\x07\xcd\xc3\x10\x8c\x82\xbb\x41\xea\xcc\xd8\x35\xc3\x72\xd8\xfd\xda\xed\x44\x0d\x9d\x17\xc9\x35\x29\x48\xf2\x93\xc2\x5a\xd4\x9e\x4c\x59\x58\x27\x6f\x90\x49\x20\xd0\x44\x7b\xce\x7e\xfd\x05\xf0\x16\x93\x22\x48\xea\xd4\x42\x8e\xe1\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\xd7\x0e\x36\x2b\xb6\x28\x6c\xf0\xe8\x06\xe1\xb7\xc2\xf9\x06\x4d\x66\xcd\x1a\x84\x06\x53\x10\xe2\x9b\xd6\x91\xda\x1b\x16\x28\xe8\x59\xa3\x65\x8d\x86\xdd\x4e\xcd\x7c\x0c\x99\x50\x0e\xe3\xbe\xce\x63\x4e\xa7\x91\xfa\xc6\x5c\x93\x64\x63\x09\xc2\xb6\x04\x93\x27\x26\x8d\xc1\x40\xe7\xa8\x8f\x81\x6e\xd8\xed\x10\xdf\x31\x64\x85\xe6\x6d\x7b\xca\x2c\x07\x90\x2e\xfa\xf0\xb5\xdb\x21\xb1\xa7\x22\xf7\x85\x45\xb6\x27\x5a\x6b\xac\x03\xb9\x5e\x63\x2a\x85\x47\x55\x76\x3b\x9d\x1b\x61\xc3\x02\x4c\x40\x99\xe5\x70\x89\x7e\x46\xaf\xbd\xfe\x49\xb7\xd3\x91\x19\xf4\xc2\xea\x93\xc9\x84\xb3\x4f\x26\x35\xa6\x41\x7c\xc7\xaf\xa4\x1b\x66\xa2\x50\xbe\xde\x97\x98\x3a\x16\x7d\x61\x35\x3d\xde\x05\x2d\x3e\x22\x18\xad\x4a\x48\x28\xcb\x88\x05\x85\xa7\x2b\x9d\xc7\x75\x3c\x9c\x1b\x40\x26\x1c\x99\x50\x66\xb0\x41\xc8\x2d\x3e\x4f\x56\x48\xbe\xd3\x09\x46\x2d\x5d\xe9\xd8\xa9\x13\xa0\xdd\x86\x26\x1f\x7a\xf3\xae\x58\x2f\xd0\xf6\xfa\xf0\x1d\x8c\x6f\xb3\x71\x1f\x26\x13\x7e\xa8\x74\x8f\x3c\x51\x5f\x92\x62\xf2\x78\x50\xe6\xbf\xf2\x56\xea\x65\x38\x6b\xd4\xf5\x2c\x03\x01\x1a\x37\x90\x18\xcd\xa0\x26\xaf\x2c\x50\xea\x25\x24\x16\x85\xc7\x74\x00\x22\x4d\xc1\x9b\x80\xbc\x1a\x67\xed\x2d\xe1\xbb\xef\xa0\x47\x9b\x4d\xe0\xe8\xf4\x72\x36\x9d\xcf\x8e\xe0\x8f\x3f\x20\x7c\x79\x1a\xbe\xbc\x7c\xda\x6f\x68\x26\xf5\x45\x96\x45\xe5\x58\xe0\x30\x47\xbc\xee\xbd\xe8\x0f\x6f\x84\x2a\xf0\x22\x0b\x6a\x46\xda\x99\x4e\x61\x12\x79\x9e\xed\xf2\xbc\x6c\xf1\x10\xd3\x68\x04\x53\xe7\x70\xbd\x50\xb8\x1f\x90\x31\x62\x39\x78\x9d\xa7\x8c\x45\xe8\x4b\xcc\x3a\x57\x48\xa8\xaa\x76\x8d\xe6\x67\x8d\x3b\xbe\xcc\xf1\x18\x00\xc0\xe4\x03\xfe\x40\xb1\xc0\x1f\xbc\xf9\x19\x6f\xd9\x47\x95\x09\x09\x55\xd3\x34\xb5\xe8\x5c\xaf\xdf\x0f\xe4\x52\xe7\x85\x3f\x6e\x91\xaf\x71\x6d\x6c\x39\x74\x94\x90\x7a\x7c\xb4\x41\x38\x69\xc5\xb3\x14\xee\x4c\x13\x4f\x44\xea\x5b\xe1\x7a\xdb\xa5\x53\xe3\xfc\x71\xb5\x44\x2f\xd5\x1a\xdb\x82\xd8\x8e\xc6\xb7\x47\xfb\xd6\x1a\xf7\xb7\x48\x78\xf1\x43\x9f\x58\xee\x4e\x6a\x7c\xd7\x69\x62\x98\x17\x6e\xd5\x63\x38\x6d\x57\xb7\xa9\x60\x02\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\xb5\x14\x9c\x69\x38\xd2\x05\x65\x5e\x57\x2c\xd8\xe6\xde\x98\x7d\x74\x45\x70\x5d\xcd\xce\xdf\xbc\x9e\x5d\xcd\x2f\x3f\x9c\xce\x8f\x1a\x70\x52\x98\x79\x52\xaa\x7d\x06\x85\x7a\xe9\x57\xac\x3f\x89\x6b\xaf\x7e\x26\x9e\xe7\x2f\xbe\x84\x2f\x30\x39\x10\xf2\x9d\x87\x39\xe0\xf3\x17\x96\x7d\xb7\x6f\xbe\x36\x69\x30\xe6\xd7\x00\x22\x93\xdf\x35\x13\xc7\x81\x58\x5c\xa3\x5f\x99\x94\x93\x63\x22\x42\x7e\xad\xac\x98\x1a\x8d\xff\x7a\x44\x4e\xcf\xcf\x1b\xf1\xc8\xef\xa7\x17\xaf\x9b\x31\x7a\xf4\x7a\x76\x3e\x7b\x3b\x9d\xcf\x76\x69\xaf\xe6\xd3\xf9\xd9\x29\x7f\xad\xc2\x77\x34\x82\xab\x6b\x99\x73\x96\xe5\xdc\x65\xd6\x39\xb7\x8b\xb5\xbe\x6e\x00\x7e\x65\xa8\x11\xb3\xb1\x88\x64\x42\x27\x55\x72\x77\x95\xd3\xbc\x21\x97\x99\x2a\x56\xf6\x53\x41\x13\xa8\xfd\xda\x8d\xd2\xbd\xb7\x18\x37\x4d\x7b\xde\x54\x7a\x6d\x0d\x1a\x3c\xc2\x09\x90\x93\x4c\xef\xf1\x87\x84\xbf\xc1\x18\x8e\xe1\x45\xcc\x24\x0f\xa4\xaa\x97\xf0\x8c\xc4\xff\x89\x84\xf5\xea\x00\xe7\x5f\x33\x6d\x79\xc3\xc4\x15\xb9\x37\xff\xfd\x74\x66\x0a\x7f\x91\x65\xc7\xb0\x6b\xc4\xef\xf7\x8c\x58\xd3\x9f\xa3\xde\xa7\xff\xbf\x3d\xfa\x6d\xea\x23\x54\x99\x1c\x9e\xec\x41\x24\x24\x9e\x27\x3b\x71\x10\x8d\xcb\x2d\x0e\x4b\x83\xc9\x3d\xc9\xf6\x65\x1b\xc3\xf7\x65\x8b\x7f\x2b\xd9\x1e\x6c\xd5\xa8\x21\x6b\x37\x63\x03\xb0\xe8\xad\xc4\x1b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\x6c\x84\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\x6b\x51\xd2\xb8\x92\x15\xfa\xba\x84\xa5\x70\x90\x96\x5a\xac\x65\xe2\x82\x3c\x6e\x70\x2d\x2e\x85\x65\xb1\x16\x7f\x2f\xd0\xd1\xec\x43\x40\x16\x89\x2f\x84\x52\x25\x2c\x25\x0d\x30\xc4\xdd\x7b\xf9\x6a\x3c\x06\xe7\x65\x8e\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x50\xd8\x1f\x76\x1b\x69\xbc\x3e\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xdc\xaf\x7a\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\x69\xe1\x36\x78\x12\x50\x39\x8c\xd2\x68\xe8\xbb\x78\x7d\xd1\xbb\x16\x56\x28\xb1\xc0\xfe\x31\x0f\x81\x6c\xab\x8d\x88\x53\x00\x39\x05\x72\x25\xa4\x06\x91\x24\xa6\xd0\x9e\x0c\x5f\x35\xf4\xaa\xa4\xfc\x7e\xe4\x2b\x79\x3c\x2f\x89\x24\x41\xe7\xaa\x74\xcf\x5e\x23\x75\xc4\x9a\xb8\x41\x6a\x27\x53\x6c\x78\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x04\xae\x8d\xa3\x4d\x16\x08\x1b\x4b\xc3\x87\x93\x3a\xe1\xe9\x3b\x45\xb2\xb6\x03\xa3\x41\x80\x32\x3c\xf2\x73\x8c\x83\xb0\x4b\x37\x0c\xf9\x9e\xb6\xa5\x9c\xa3\xcd\x66\xd8\x06\x72\x13\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x1c\x40\x6e\x72\xce\xd3\xdf\x2a\x67\x31\x59\x5f\xce\x7e\x9d\x5d\xd6\xc5\xff\xf1\x4e\xac\xfa\xfe\xa7\xf5\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x4d\xee\x01\x14\xc9\xdf\xd6\xc6\xf7\x8d\xe3\x28\xe1\xfc\xd6\x31\x4b\x0c\x33\x4d\x53\x01\x57\x28\xef\x76\x72\xf7\x6e\x72\x30\x79\x55\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xd6\xc2\xb6\xe9\xde\xe2\xf3\xac\x61\xe3\x0d\xb7\x5c\x81\xa8\x91\x1a\x78\xbd\xea\xdd\x44\xa8\x06\xac\xbb\x29\x3c\xc1\x81\xea\xf7\x36\xf9\x2d\x85\xfb\xe0\xd8\xeb\x31\xfd\x2d\xe4\xf2\x4c\xfb\x5e\xb5\x78\xa6\xe1\x39\x54\x2f\x94\xd4\xe1\x79\x2b\x8a\x0e\x64\xc7\x4e\x8a\x0a\x3d\xc2\x56\xc4\x09\xec\x7c\x22\x41\xc1\x1c\x6c\x34\x8b\x7e\xbf\x38\x8f\xa3\x34\x32\xd8\x13\x8b\x7e\x88\xbf\x17\x42\xb9\xde\xb8\x6e\x16\xc2\x09\xbc\xe1\xf2\x36\xa9\x0b\x5c\x55\x01\x89\xa7\xd5\x7e\x44\x81\x81\x2d\x5a\xa3\x62\x4b\x17\xa1\x6a\xa5\xf8\xa0\x84\x28\x22\xa6\x8d\xda\x97\x11\x98\x87\xfa\xcf\x4e\x93\x00\x9e\xd6\x0d\x41\x26\xa4\x2a\x2c\x3e\x3d\x81\x03\x69\xc7\x15\x36\x13\x09\xfb\xd2\x21\xf0\xc4\xea\xc0\x99\x35\xae\xcc\x26\x28\x70\x28\x79\xed\x83\xa3\xc6\xc1\x4e\xf9\xe0\xab\x17\xe1\xa0\x70\x62\x89\x0d\x70\xd4\x06\xaf\x1c\x75\x70\x8c\xfe\xd3\xd0\x79\x56\xbf\x3e\x02\x45\x77\xff\x19\x78\xec\xf8\x79\xaf\xcf\xa9\x88\xb8\xdb\x69\xbc\x54\xca\x86\x66\xe4\xaf\xe5\xf8\x47\x47\xd8\x2e\x6d\x38\x5a\x9b\x38\x1c\x70\xdb\xd7\x7c\xdb\xfd\xf5\xea\x7d\x9e\xbf\xaf\x65\x22\x8c\xea\xdf\x30\xf1\x5b\x9c\x72\x97\x43\x6f\xb9\xc5\x1b\x69\x0a\x2a\x60\xf8\xbf\x34\x12\xd6\x2d\xdf\x5d\xb7\x73\x17\xef\xc6\xd8\x6f\xcd\xcb\xb1\xcd\x2a\xde\xed\x86\x6e\xa9\x51\x3e\x0c\xd7\xd6\x78\x65\x96\x85\x5b\xd7\x0e\xf3\x3f\x70\x49\x16\x03\xdd\x9b\x9c\xda\x81\x58\x9d\x94\x45\x91\x96\x75\x41\x1c\x84\x46\x04\x56\x42\xa7\x71\x18\x11\x69\x2a\x49\x1e\x83\x90\x34\x14\x4b\x21\x75\xf7\xa0\x19\xbf\x59\x85\x0f\x21\x63\xaf\xb7\x6d\x16\xd2\x38\x44\xd2\xc4\xc7\x1a\x77\x1f\x51\x30\x77\x82\x68\xf7\xbe\x2f\x5e\x19\x1a\xed\x8a\x35\x77\xc2\x20\x6e\x84\x54\x82\xa6\x2f\xee\xb0\x74\x0a\x89\x42\xa1\xc3\x2d\x3f\x66\xde\xdc\xa0\x75\xdd\x47\x80\xfc\xcf\x60\x7c\x27\x2b\x56\xaf\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x98\x37\x44\x96\xf4\xfc\x03\x08\xb5\xef\x3e\x2e\xa4\xb8\x67\x22\x9a\x9f\x60\xdc\xe8\xcb\xff\x2a\x41\xb6\x0f\xb1\xf3\xba\x3f\x8b\x87\xf7\xc6\x0c\x40\xa1\xe0\x29\xa9\xfa\x3d\x53\xf5\xa3\x0f\x0d\x6d\x55\xf4\x86\x8e\x6e\x2f\x7c\xf9\x5e\x6b\x85\xd5\x0d\x48\x68\xed\x17\x88\x1a\xa4\x47\x2b\x68\x1e\x22\x74\xc5\x3f\x0a\xa4\xa5\x63\x71\xec\x17\x49\x41\x17\x05\xc7\xeb\x7d\x2a\xcc\x52\x2f\x87\xdd\x4e\xf8\xde\x88\xf7\xc4\xdf\x6e\xe3\x3d\x54\x40\xe6\x8c\x77\x02\xf5\x95\x40\xe2\x6f\xb9\x5b\xe4\xb1\x79\xe7\x5e\x80\xd6\xe8\x53\x98\xa9\x77\x6e\x01\x98\x31\xde\x04\xec\x5e\x36\xd2\x1a\x7f\x6b\x01\x9c\x49\x97\xc2\x05\x31\x3b\x21\xe1\x6f\xf7\x23\xa2\x62\xa0\x60\x38\x3e\xcc\x40\x4b\x07\x98\x76\x6e\x26\x88\x98\x3f\x85\xd5\x50\xcf\x8f\x9b\xab\xe1\x53\x3c\xa8\x5c\x37\x6c\x23\xd7\x6c\x9b\xbb\x93\xc3\x49\x6e\x5c\xe1\xf1\x70\x32\x23\x9b\xd7\x80\xbd\x87\xb5\x39\x6b\xec\x93\x3c\x94\x2a\x59\x7a\x95\xd9\xee\x61\x65\xe9\x8d\x96\xc3\xdf\x3e\x5e\x64\x4d\xdc\x54\xb1\x45\xd3\x12\xc2\xb7\x8d\x7b\xcb\x87\x26\x2d\x1a\x54\x22\x61\xd5\x5c\x4d\x26\x4f\xc7\xb7\xf5\xcf\x81\x98\xab\x5a\x34\x95\x12\x21\x32\xc2\x79\x39\x2a\xe4\x3f\x30\x6e\xdb\x8c\xc1\x6a\x09\x2c\x86\x9f\x18\xdc\xcd\x52\x08\x9a\x05\x37\x10\x85\xa3\x51\x74\x1b\x5b\x29\x3a\x69\x31\x85\x4c\xa2\x4a\xc1\xa4\x68\x79\xd0\xfd\xcd\x19\x1d\x7e\x57\xa1\x95\x24\x31\xfc\x96\x0b\x7f\xc8\xf9\x67\xa1\x96\x09\xfa\x12\x32\x14\xfc\xdf\xc9\x1b\xc8\x85\x73\xb0\x46\x41\xa3\x6d\x56\x28\x55\x82\xb1\x29\x92\xf0\x7a\xd6\xa3\xb0\x36\x50\x38\xb4\x0e\x36\x2b\x13\x4b\x2d\xb7\x78\x39\x75\xab\xd2\x0f\xe2\x75\x8e\x74\xb9\x12\x25\x48\x4f\x65\x3d\x1e\xaa\x19\xe9\xf5\xcf\x1e\xfe\x63\x64\xc8\xc0\xfb\x61\x5e\x4d\x85\xed\x38\xe7\xcf\xf4\xd6\x8e\xf0\x38\x14\xb5\x63\x7b\x7b\xd1\xd5\x0e\xe4\xaa\xf4\xb4\xa3\xb5\x59\xc8\xda\x21\xc9\x2b\xfc\xd6\x0e\xc6\x46\xab\xcd\x0b\x8c\xa0\x9a\x81\xdf\x76\xc2\x93\xb5\x8c\xf1\x19\x7e\x6d\xd6\xe4\xfc\x36\x88\x80\x21\x2f\xf6\xc8\x38\xd7\x58\x52\x36\x0f\x36\x6a\x94\xa6\xf0\xe1\xf3\x35\x96\x5f\x0e\x57\xa2\x08\xc7\x06\x5d\x5d\x7a\xaa\xb0\x08\x6b\x0f\x24\x83\x5a\x0b\x39\x19\x9f\x80\xfc\xb1\xc9\x50\x55\x4f\x90\xcf\x9e\x55\x7b\x36\xd7\x3f\xcb\x2f\x55\x84\xd7\x88\xdf\x59\xef\xb7\x34\x8a\x31\x12\x68\x28\x28\xba\x77\xdd\x7f\x06\x00\x00\xff\xff\x5a\x43\x33\xde\x00\x22\x00\x00") func call_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -133,11 +133,11 @@ func call_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xef, 0x68, 0xda, 0xd8, 0x9, 0xf5, 0xd5, 0x71, 0xa8, 0x8a, 0xfb, 0x30, 0xe8, 0xf0, 0x72, 0x14, 0x36, 0x6b, 0x62, 0x5a, 0x4e, 0xff, 0x16, 0xdc, 0xd3, 0x2c, 0x68, 0x7b, 0x79, 0x9f, 0xd3}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x39, 0xde, 0xc6, 0x79, 0xff, 0xe3, 0x5d, 0x47, 0xed, 0xbd, 0xf4, 0x21, 0xe8, 0xc9, 0x4, 0xe0, 0xe0, 0xe4, 0x76, 0x88, 0x25, 0x7f, 0x4f, 0x30, 0xfe, 0x30, 0x1f, 0x8c, 0x4d, 0x76, 0x3d}} return a, nil } -var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xdf\x6f\xda\xca\x12\x7e\x86\xbf\x62\x94\x27\x50\x29\x60\x63\x08\x38\x27\x47\xe2\xa6\xf4\x1c\xae\xd2\x24\x02\x72\x8f\x2a\x94\x87\x05\xc6\xb0\xaa\xf1\x5a\xbb\x6b\x72\xb8\x55\xfe\xf7\xab\xd9\x59\x03\xf9\x75\xdb\x4a\xa7\x0f\x3b\xb5\x77\xbe\x6f\xbe\x9d\x19\xcf\x92\x56\x0b\xae\x54\xbe\xd7\x72\xbd\xb1\x10\xb6\x83\x73\x98\x6d\x10\xd6\xea\x23\xda\x0d\x6a\x2c\xb6\x30\x2c\xec\x46\x69\x53\x6d\xb5\x60\xb6\x91\x06\x12\x99\x22\x48\x03\xb9\xd0\x16\x54\x02\xf6\x85\x7f\x2a\x17\x5a\xe8\x7d\xb3\xda\x6a\x31\xe6\xcd\x6d\x62\x48\x34\x22\x18\x95\xd8\x47\xa1\x31\x86\xbd\x2a\x60\x29\x32\xd0\xb8\x92\xc6\x6a\xb9\x28\x2c\x82\xb4\x20\xb2\x55\x4b\x69\xd8\xaa\x95\x4c\xf6\x44\x29\x2d\x14\xd9\x0a\xb5\x0b\x6d\x51\x6f\x4d\xa9\xe3\x8f\x9b\x7b\xb8\x46\x63\x50\xc3\x1f\x98\xa1\x16\x29\xdc\x15\x8b\x54\x2e\xe1\x5a\x2e\x31\x33\x08\xc2\x40\x4e\x6f\xcc\x06\x57\xb0\x70\x74\x04\xfc\x4c\x52\xa6\x5e\x0a\x7c\x56\x45\xb6\x12\x56\xaa\xac\x01\x28\x49\x39\xec\x50\x1b\xa9\x32\xe8\x94\xa1\x3c\x61\x03\x94\x26\x92\x9a\xb0\x74\x00\x0d\x2a\x27\x5c\x1d\x44\xb6\x87\x54\xd8\x23\xf4\x27\x12\x72\x3c\xf7\x0a\x64\xe6\xc2\x6c\x54\x8e\x60\x37\xc2\xd2\xa9\x1f\x65\x9a\xc2\x02\xa1\x30\x98\x14\x69\x83\xd8\x16\x85\x85\xbf\xc6\xb3\x3f\x6f\xef\x67\x30\xbc\xf9\x0a\x7f\x0d\x27\x93\xe1\xcd\xec\xeb\x05\x3c\x4a\xbb\x51\x85\x05\xdc\x21\x53\xc9\x6d\x9e\x4a\x5c\xc1\xa3\xd0\x5a\x64\x76\x0f\x2a\x21\x86\x2f\xa3\xc9\xd5\x9f\xc3\x9b\xd9\xf0\x5f\xe3\xeb\xf1\xec\x2b\x28\x0d\x9f\xc7\xb3\x9b\xd1\x74\x0a\x9f\x6f\x27\x30\x84\xbb\xe1\x64\x36\xbe\xba\xbf\x1e\x4e\xe0\xee\x7e\x72\x77\x3b\x1d\x35\x61\x8a\xa4\x0a\x09\xff\xe3\x9c\x27\xae\x7a\x1a\x61\x85\x56\xc8\xd4\x94\x99\xf8\xaa\x0a\x30\x1b\x55\xa4\x2b\xd8\x88\x1d\x82\xc6\x25\xca\x1d\xae\x40\xc0\x52\xe5\xfb\x9f\x2e\x2a\x71\x89\x54\x65\x6b\x77\xe6\x77\x1b\x12\xc6\x09\x64\xca\x36\xc0\x20\xc2\x6f\x1b\x6b\xf3\xb8\xd5\x7a\x7c\x7c\x6c\xae\xb3\xa2\xa9\xf4\xba\x95\x32\x9d\x69\xfd\xde\xac\x12\x27\xee\xb6\x2b\x69\x66\x5a\x2c\x51\x83\x46\x5b\xe8\xcc\x80\x29\x92\x84\xfc\x2c\xc8\x2c\x51\x7a\xeb\xda\x04\x12\xad\xb6\x20\xc0\x92\x2f\x58\x05\x39\x6a\xda\xf4\x14\x1f\x8d\xdd\xa7\x4e\xe6\x4a\x1a\x61\x0c\x6e\x17\xe9\xbe\x59\xfd\x5e\xad\x18\x2b\x96\xdf\x62\x98\x7f\x57\xb9\x89\x61\xfe\xf0\xf4\xd0\xa8\x56\x2b\x59\x5e\x98\x0d\x9a\x18\xbe\xb7\x63\x68\x37\x20\x88\x21\x68\x40\xe8\xd6\x8e\x5b\x23\xb7\x76\xdd\xda\x73\xeb\xb9\x5b\xfb\x6e\x1d\xb8\x35\x68\xb3\x61\x74\xc0\x6e\x01\xfb\x05\xec\x18\xb0\x67\xc8\x9e\xa1\x8f\xc3\x81\x42\x8e\x14\x72\xa8\x90\x63\x85\xcc\xd2\x61\x97\x88\x59\x22\x66\xe9\x32\x4b\x97\x59\xba\xec\xd2\x65\x96\xae\x17\xdc\x75\xe7\xe9\x32\x4b\xf7\x9c\x9f\x98\xa5\xcb\x2c\x3d\x3e\x72\x8f\x01\x3d\x7f\x44\x06\xf4\x58\x7c\x8f\x01\x3d\x06\xf4\x19\xd0\xe7\xb0\xfd\x90\x9f\x3a\x6c\x98\xa5\xcf\x61\xfb\x3d\x36\x1c\xb6\xcf\x2c\x7d\x66\x19\xb0\xf8\x41\xe0\xf6\x06\x1c\x6f\xc0\xf1\x06\x3e\xab\x65\x5a\x7d\x5e\xdb\x3e\xb1\xed\xd0\xdb\x8e\xb7\x91\xb7\x5d\x6f\x7d\xe6\xdb\x3e\xf5\x6d\x9f\xfb\xb6\xe7\x3b\xd4\xc9\xf3\x05\x9e\x2f\xf0\x7c\x81\xe7\x0b\x3c\x5f\x59\xc9\xb2\x94\x65\x2d\x7d\x31\x03\x5f\xcd\xc0\x97\x33\xf0\xf5\x0c\x7c\x41\x03\x5f\xd1\xc0\x97\x34\xf0\x35\x0d\x42\xcf\x17\xf6\x63\x08\xc9\x0e\x62\xe8\x34\x20\xe8\xb4\x63\x88\xc8\x06\x31\x74\xc9\x86\x31\xf4\xc8\x76\x62\x38\x27\x1b\xc5\xd0\x27\xdb\x8d\x61\x40\x96\xf8\xa8\x6b\x3b\x44\x48\x8c\x1d\x52\x48\x94\x1d\x92\x48\x9c\x11\x69\x24\xd2\x88\x44\x12\x6b\x44\x2a\x89\x36\x22\x99\xc4\x1b\x45\xac\x23\xea\xb2\x8e\xa8\xc7\x3a\xa2\x73\xd6\x41\xdd\xe7\x00\x03\xd6\x41\xfd\x47\x3a\xa8\x01\x49\x87\xeb\x40\xd2\xe1\x7a\x90\x74\xb8\x2e\x24\x4a\xea\x43\xa7\xc3\x75\x22\x91\x52\x2f\x3a\x1d\xae\x1b\x89\xd6\xf5\x23\xf1\xfa\x8e\x0c\x7a\x81\xb7\xa1\xb7\x1d\x6f\x23\x67\xc3\xc8\x7f\x45\x91\xff\x8c\x22\xff\x1d\x45\x1d\xbf\xef\xfd\xdc\x47\xf0\x44\xdf\x79\xab\x05\x1a\x4d\x91\x5a\x1a\xfe\x32\xdb\xa9\x6f\x34\x9e\x37\x98\x81\x48\x53\x37\xc7\x54\xbe\x54\x2b\x34\x3c\x1f\x17\x88\x19\x48\x8b\x5a\xd0\x05\xa1\x76\xa8\xe9\x6e\x2c\x27\x93\xa3\x23\x4c\x22\x33\x91\x96\xc4\x7e\x86\xd2\x60\x92\xd9\xba\x59\xad\xf0\xfb\x18\x92\x22\x5b\xd2\xe8\xaa\xd5\xe1\xbb\xa7\x00\xbb\x91\xa6\xe9\x46\xd2\xbc\xfd\xd0\x54\xb9\xb9\x80\x52\x67\x22\xde\x92\x49\xd4\x62\x69\x0b\x91\x02\xfe\x8d\xcb\xc2\xcd\x42\x95\x80\xc8\xbc\x72\x48\x78\xe0\x57\x1c\xfe\x24\x6a\xaa\xd6\x0d\x58\x2d\x28\x78\x19\xc2\x58\xcc\x4f\x23\xd0\xb5\x81\x3b\xd4\xfb\x92\xcb\x5d\x83\x14\xf2\x3f\x5f\x7c\x38\x24\x6a\xc2\xbd\xc9\x5c\xad\x54\x76\x42\x43\xa2\xc5\x16\xe1\xf2\xf4\x74\xc7\xff\x36\x53\xcc\xd6\x76\x03\x1f\x21\x78\xb8\xa8\x7a\x04\x6a\xad\x34\x5c\x42\xaa\xd6\xcd\x35\xda\x11\x3d\xd6\xea\x17\xd5\x4a\x45\x26\x50\x73\xbb\x4c\x5f\x71\xdc\xf3\x33\xf7\xea\xec\x01\x2e\x19\x4a\x9e\x4f\x80\xa9\x41\x20\x80\xa7\xf9\x84\xb9\xdd\xd4\xea\x70\x79\x2a\xc5\xc7\xf7\x74\x2a\xa7\x4b\x05\x2e\xf9\xa9\xa2\xf2\x18\xe8\x1f\x11\xa8\xbc\x69\xd5\x4d\xb1\x5d\xa0\xae\xd5\x1b\x6e\x7b\x45\x84\x10\xc3\x73\x7e\xde\x2b\xcb\x3c\x7f\x70\xcf\x4f\x24\xc9\xa9\x77\x8a\xa9\xb6\xe5\xc9\x7f\x87\xb6\x8f\xee\xce\x9e\x6b\xdc\xa9\x1c\x2e\xe1\xe0\x38\x7f\x05\xe1\x64\x11\x22\x51\xba\x46\x28\x09\x97\xd0\xbe\x00\x09\xbf\xf1\xd9\xfc\x0d\x36\x67\xb6\xa6\xca\x1f\x2e\x40\x7e\xf8\x50\x77\xa0\x8a\x7f\xcb\x1a\x9b\xe4\xea\x72\xc4\x09\xc9\x11\xbf\xd5\x64\xbd\x69\xd5\xd4\x6a\x99\xad\x6b\x41\xaf\xee\x72\x5f\x79\xa2\xc5\x3c\x4a\xbb\x64\x7f\x97\x12\xef\x54\xf7\x67\x58\x0a\x83\x70\x76\x35\xbc\xbe\x3e\x8b\xe1\xf8\x70\x75\xfb\x69\x74\x16\x1f\x0e\x29\x33\x63\xe9\xe7\x2b\x97\xf8\x24\x6e\xa7\xde\xdc\x89\xb4\xc0\xdb\x84\xeb\x7d\x70\x97\xff\xc5\xd7\xde\xd1\x2b\x6f\x2e\xe0\xfc\x6c\x2d\x8c\x6b\x87\x17\x80\xf6\xbb\x00\xab\xde\xf2\x0f\x9e\xa7\xe1\x39\xc4\x31\xbd\x85\x0a\x4f\x50\x2f\x30\x32\xcb\x0b\x7b\xc0\x6c\x71\xab\xf4\xbe\x69\xe8\x87\x4f\xcd\xe7\xa4\x71\x48\xce\x07\x7f\xee\x17\x14\xc7\x5e\xcf\x8a\x34\x7d\xbe\xc7\x73\xe4\x9d\x4d\x95\x73\x4e\xe6\xbe\x77\x4e\x3e\x02\xd7\x02\xec\xe7\xa3\x2d\x34\x8a\x6f\x17\xc7\x8a\x7e\x1a\x5d\x8f\xfe\x18\xce\x46\xcf\x2a\x3b\x9d\x0d\x67\xe3\x2b\x7e\xf5\xe3\xda\x86\xbf\x54\xdb\xd7\x9d\x70\x3c\x87\x3b\x06\xbc\x6a\xc1\xb7\x5b\xe0\x97\x7b\xe0\x97\x9a\xe0\x58\xd0\x7f\xa2\xa2\xff\xbf\xa4\xff\x74\x4d\x27\xa3\xd9\xfd\xe4\xe6\xa4\x74\xf4\xe7\xca\x4f\x7c\x33\xde\xf5\xed\xba\x05\xaf\xdc\x79\x7c\xf9\x2b\xee\x8d\xc6\x57\x85\x6d\xb8\xd0\x1f\x4a\xd6\x77\xf4\x4e\x67\xb7\x77\xc7\xde\xbb\x1f\x5f\x8d\x0f\x43\xe5\x47\x31\xda\x0d\x68\xbf\xc3\xfa\xef\xfb\x2f\x77\x9f\x46\xd3\x99\x67\x2a\x33\x9b\x2f\x0f\x9f\xe9\x1a\xed\xdd\x55\xed\x64\x06\xca\xa4\x9c\x7f\xd2\xdc\x51\x9a\xcb\xe9\x77\x40\xa7\x98\x1d\xe0\xcf\x6e\x0e\xf8\x08\xed\xbf\xbb\x78\xe4\x3a\x0e\xf7\x97\x05\xf3\x37\x98\x23\x3e\xd6\xf5\xd9\x45\x7a\x3c\xdd\xf3\x3b\x88\xf1\xd5\xca\x53\xf5\xa9\xfa\xbf\x00\x00\x00\xff\xff\x51\x4b\xdc\x7e\x62\x10\x00\x00") +var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xdf\x6f\xda\xca\x12\x7e\x86\xbf\x62\x94\x27\x50\x29\x60\x63\x08\x38\x27\x47\xe2\xa6\xf4\x1c\xae\xd2\x24\x02\x72\x8f\x2a\x94\x87\x05\xc6\xb0\xaa\xf1\x5a\xbb\x6b\x72\xb8\x55\xfe\xf7\xab\xd9\x59\x03\xf9\x75\xdb\x4a\xa7\x0f\x3b\xb5\x77\xbe\x6f\xbe\x9d\x19\xcf\x92\x56\x0b\xae\x54\xbe\xd7\x72\xbd\xb1\x10\xb6\x83\x73\x98\x6d\x10\xd6\xea\x23\xda\x0d\x6a\x2c\xb6\x30\x2c\xec\x46\x69\x53\x6d\xb5\x60\xb6\x91\x06\x12\x99\x22\x48\x03\xb9\xd0\x16\x54\x02\xf6\x85\x7f\x2a\x17\x5a\xe8\x7d\xb3\xda\x6a\x31\xe6\xcd\x6d\x62\x48\x34\x22\x18\x95\xd8\x47\xa1\x31\x86\xbd\x2a\x60\x29\x32\xd0\xb8\x92\xc6\x6a\xb9\x28\x2c\x82\xb4\x20\xb2\x55\x4b\x69\xd8\xaa\x95\x4c\xf6\x44\x29\x2d\x14\xd9\x0a\xb5\x0b\x6d\x51\x6f\x4d\xa9\xe3\x8f\x9b\x7b\xb8\x46\x63\x50\xc3\x1f\x98\xa1\x16\x29\xdc\x15\x8b\x54\x2e\xe1\x5a\x2e\x31\x33\x08\xc2\x40\x4e\x6f\xcc\x06\x57\xb0\x70\x74\x04\xfc\x4c\x52\xa6\x5e\x0a\x7c\x56\x45\xb6\x12\x56\xaa\xac\x01\x28\x49\x39\xec\x50\x1b\xa9\x32\xe8\x94\xa1\x3c\x61\x03\x94\x26\x92\x9a\xb0\x74\x00\x0d\x2a\x27\x5c\x1d\x44\xb6\x87\x54\xd8\x23\xf4\x27\x12\x72\x3c\xf7\x0a\x64\xe6\xc2\x6c\x54\x8e\x60\x37\xc2\xd2\xa9\x1f\x65\x9a\xc2\x02\xa1\x30\x98\x14\x69\x83\xd8\x16\x85\x85\xbf\xc6\xb3\x3f\x6f\xef\x67\x30\xbc\xf9\x0a\x7f\x0d\x27\x93\xe1\xcd\xec\xeb\x05\x3c\x4a\xbb\x51\x85\x05\xdc\x21\x53\xc9\x6d\x9e\x4a\x5c\xc1\xa3\xd0\x5a\x64\x76\x0f\x2a\x21\x86\x2f\xa3\xc9\xd5\x9f\xc3\x9b\xd9\xf0\x5f\xe3\xeb\xf1\xec\x2b\x28\x0d\x9f\xc7\xb3\x9b\xd1\x74\x0a\x9f\x6f\x27\x30\x84\xbb\xe1\x64\x36\xbe\xba\xbf\x1e\x4e\xe0\xee\x7e\x72\x77\x3b\x1d\x35\x61\x8a\xa4\x0a\x09\xff\xe3\x9c\x27\xae\x7a\x1a\x61\x85\x56\xc8\xd4\x94\x99\xf8\xaa\x0a\x30\x1b\x55\xa4\x2b\xd8\x88\x1d\x82\xc6\x25\xca\x1d\xae\x40\xc0\x52\xe5\xfb\x9f\x2e\x2a\x71\x89\x54\x65\x6b\x77\xe6\x77\x1b\x12\xc6\x09\x64\xca\x36\xc0\x20\xc2\x6f\x1b\x6b\xf3\xb8\xd5\x7a\x7c\x7c\x6c\xae\xb3\xa2\xa9\xf4\xba\x95\x32\x9d\x69\xfd\xde\xac\x12\x27\xee\xb6\x2b\x69\x66\x5a\x2c\x51\x83\x46\x5b\xe8\xcc\x80\x29\x92\x44\x2e\x25\x66\x16\x64\x96\x28\xbd\x75\x7d\x02\x89\x56\x5b\x10\x60\xc9\x19\xac\x82\x1c\x35\x6d\x7a\x8e\x8f\xc6\xee\x53\xa7\x73\x25\x8d\x30\x06\xb7\x8b\x74\xdf\xac\x7e\xaf\x56\x8c\x15\xcb\x6f\x31\xcc\xbf\xab\xdc\xc4\x30\x7f\x78\x7a\x68\x54\xab\x95\x2c\x2f\xcc\x06\x4d\x0c\xdf\xdb\x31\xb4\x1b\x10\xc4\x10\x34\x20\x74\x6b\xc7\xad\x91\x5b\xbb\x6e\xed\xb9\xf5\xdc\xad\x7d\xb7\x0e\xdc\x1a\xb4\xd9\x30\x3a\x60\xb7\x80\xfd\x02\x76\x0c\xd8\x33\x64\xcf\xd0\xc7\xe1\x40\x21\x47\x0a\x39\x54\xc8\xb1\x42\x66\xe9\xb0\x4b\xc4\x2c\x11\xb3\x74\x99\xa5\xcb\x2c\x5d\x76\xe9\x32\x4b\xd7\x0b\xee\xba\xf3\x74\x99\xa5\x7b\xce\x4f\xcc\xd2\x65\x96\x1e\x1f\xb9\xc7\x80\x9e\x3f\x22\x03\x7a\x2c\xbe\xc7\x80\x1e\x03\xfa\x0c\xe8\x73\xd8\x7e\xc8\x4f\x1d\x36\xcc\xd2\xe7\xb0\xfd\x1e\x1b\x0e\xdb\x67\x96\x3e\xb3\x0c\x58\xfc\x20\x70\x7b\x03\x8e\x37\xe0\x78\x03\x9f\xd5\x32\xad\x3e\xaf\x6d\x9f\xd8\x76\xe8\x6d\xc7\xdb\xc8\xdb\xae\xb7\x3e\xf3\x6d\x9f\xfa\xb6\xcf\x7d\xdb\xf3\x1d\xea\xe4\xf9\x02\xcf\x17\x78\xbe\xc0\xf3\x05\x9e\xaf\xac\x64\x59\xca\xb2\x96\xbe\x98\x81\xaf\x66\xe0\xcb\x19\xf8\x7a\x06\xbe\xa0\x81\xaf\x68\xe0\x4b\x1a\xf8\x9a\x06\xa1\xe7\x0b\xfb\x31\x84\x64\x07\x31\x74\x1a\x10\x74\xda\x31\x44\x64\x83\x18\xba\x64\xc3\x18\x7a\x64\x3b\x31\x9c\x93\x8d\x62\xe8\x93\xed\xc6\x30\x20\x4b\x7c\xd4\xb5\x1d\x22\x24\xc6\x0e\x29\x24\xca\x0e\x49\x24\xce\x88\x34\x12\x69\x44\x22\x89\x35\x22\x95\x44\x1b\x91\x4c\xe2\x8d\x22\xd6\x11\x75\x59\x47\xd4\x63\x1d\xd1\x39\xeb\xa0\xee\x73\x80\x01\xeb\xa0\xfe\x23\x1d\xd4\x80\xa4\xc3\x75\x20\xe9\x70\x3d\x48\x3a\x5c\x17\x12\x25\xf5\xa1\xd3\xe1\x3a\x91\x48\xa9\x17\x9d\x0e\xd7\x8d\x44\xeb\xfa\x91\x78\x7d\x47\x06\xbd\xc0\xdb\xd0\xdb\x8e\xb7\x91\xb3\x61\xe4\xbf\xa2\xc8\x7f\x46\x91\xff\x8e\xa2\x8e\xdf\xf7\x7e\xee\x23\x78\xa2\xef\xbc\xd5\x02\x8d\xa6\x48\x2d\x4d\x7f\x99\xed\xd4\x37\x9a\xcf\x1b\xcc\x40\xa4\xa9\x1b\x64\x2a\x5f\xaa\x15\x1a\x1e\x90\x0b\xc4\x0c\xa4\x45\x2d\xe8\x86\x50\x3b\xd4\x74\x39\x96\xa3\xc9\xd1\x11\x26\x91\x99\x48\x4b\x62\x3f\x44\x69\x30\xc9\x6c\xdd\xac\x56\xf8\x7d\x0c\x49\x91\x2d\x69\x74\xd5\xea\xf0\xdd\x53\x80\xdd\x48\xd3\x74\x23\x69\xde\x7e\x68\xaa\xdc\x5c\x40\xa9\x33\x11\x6f\xc9\x24\x6a\xb1\xb4\x85\x48\x01\xff\xc6\x65\xe1\x66\xa1\x4a\x40\x64\x5e\x39\x24\x3c\xf1\x2b\x0e\x7f\x12\x35\x55\xeb\x06\xac\x16\x14\xbc\x0c\x61\x2c\xe6\xa7\x11\xe8\xde\xc0\x1d\xea\x7d\xc9\xe5\xee\x41\x0a\xf9\x9f\x2f\x3e\x1c\x12\x35\xe1\xde\x64\xae\x56\x2a\x3b\xa1\x21\xd1\x62\x8b\x70\x79\x7a\xba\xe3\x7f\x9b\x29\x66\x6b\xbb\x81\x8f\x10\x3c\x5c\x54\x3d\x02\xb5\x56\x1a\x2e\x21\x55\xeb\xe6\x1a\xed\x88\x1e\x6b\xf5\x8b\x6a\xa5\x22\x13\xa8\xb9\x5d\xa6\xaf\x38\xee\xf9\x99\x7b\x75\xf6\x00\x97\x0c\x25\xcf\x27\xc0\xd4\x20\x10\xc0\xd3\x7c\xc2\xdc\x6e\x6a\x75\xb8\x3c\x95\xe2\xe3\x7b\x3a\x95\xd3\xa5\x02\x97\xfc\x54\x51\x79\x0c\xf4\x8f\x08\x54\xde\xb4\xea\xa6\xd8\x2e\x50\xd7\xea\x0d\xb7\xbd\x22\x42\x88\xe1\x39\x3f\xef\x95\x65\x9e\x3f\xb8\xe7\x27\x92\xe4\xd4\x3b\xc5\x54\xdb\xf2\xe4\xbf\x43\xdb\x47\x77\x67\xcf\x35\xee\x54\x0e\x97\x70\x70\x9c\xbf\x82\x70\xb2\x08\x91\x28\x5d\x23\x94\x84\x4b\x68\x5f\x80\x84\xdf\xf8\x6c\xfe\x06\x9b\x33\x5b\x53\xe5\x0f\x17\x20\x3f\x7c\xa8\x3b\x50\xc5\xbf\x65\x8d\x4d\x72\x75\x39\xe2\x84\xe4\x88\xdf\x6a\xb2\xde\xb4\x6a\x6a\xb5\xcc\xd6\xb5\xa0\x57\x77\xb9\xaf\x3c\xd1\x62\x1e\xa5\x5d\xb2\xbf\x4b\x89\x77\xaa\xfb\x33\x2c\x85\x41\x38\xbb\x1a\x5e\x5f\x9f\xc5\x70\x7c\xb8\xba\xfd\x34\x3a\x8b\x0f\x87\x94\x99\xb1\xf4\xfb\x95\x4b\x7c\x12\xb7\x53\x6f\xee\x44\x5a\xe0\x6d\xc2\xf5\x3e\xb8\xcb\xff\xe2\x6b\xef\xe8\x95\x37\x17\x70\x7e\xb6\x16\xc6\xb5\xc3\x0b\x40\xfb\x5d\x80\x55\x6f\xf9\x07\xcf\xd3\xf0\x1c\xe2\x98\xde\x42\x85\x27\xa8\x17\x18\x99\xe5\x85\x3d\x60\xb6\xb8\x55\x7a\xdf\x34\xf4\xcb\xa7\xe6\x73\xd2\x38\x24\xe7\x83\x3f\xf7\x0b\x8a\x63\xaf\x67\x45\x9a\x3e\xdf\xe3\x39\xf2\xce\xa6\xca\x39\x27\x73\xdf\x3b\x27\x1f\x81\x6b\x01\xf6\xf3\xd1\x16\x1a\xc5\xb7\x8b\x63\x45\x3f\x8d\xae\x47\x7f\x0c\x67\xa3\x67\x95\x9d\xce\x86\xb3\xf1\x15\xbf\xfa\x71\x6d\xc3\x5f\xaa\xed\xeb\x4e\x38\x9e\xc3\x1d\x03\x5e\xb5\xe0\xdb\x2d\xf0\xcb\x3d\xf0\x4b\x4d\x70\x2c\xe8\x3f\x51\xd1\xff\x5f\xd2\x7f\xba\xa6\x93\xd1\xec\x7e\x72\x73\x52\x3a\xfa\x7b\xe5\x27\xbe\x19\xef\xfa\x76\xdd\x82\x57\xee\x3c\xbe\xfc\x15\xf7\x46\xe3\xab\xc2\x36\x5c\xe8\x0f\x25\xeb\x3b\x7a\xa7\xb3\xdb\xbb\x63\xef\xdd\x8f\xaf\xc6\x87\xa1\xf2\xa3\x18\xed\x06\xb4\xdf\x61\xfd\xf7\xfd\x97\xbb\x4f\xa3\xe9\xcc\x33\x95\x99\xcd\x97\x87\xcf\x74\x8d\xf6\xee\xaa\x76\x32\x03\x65\x52\xce\x3f\x69\xee\x28\xcd\xe5\xf4\x3b\xa0\x53\xcc\x0e\xf0\x67\x37\x07\x7c\x84\xf6\xdf\x5d\x3c\x72\x1d\x87\xfb\xcb\x82\xf9\x1b\xcc\x11\x1f\xeb\xfa\xec\x22\x3d\x9e\xee\xf9\x1d\xc4\xf8\x6a\xe5\xa9\xfa\x54\xfd\x5f\x00\x00\x00\xff\xff\xdf\x2f\xd9\xfa\x63\x10\x00\x00") func evmdis_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -153,7 +153,7 @@ func evmdis_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "evmdis_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd5, 0xe8, 0x96, 0xa1, 0x8b, 0xc, 0x68, 0x3c, 0xe8, 0x5d, 0x7e, 0xf0, 0xab, 0xfe, 0xec, 0xd1, 0xb, 0x3d, 0xfc, 0xc7, 0xac, 0xb5, 0xa, 0x41, 0x55, 0x0, 0x3a, 0x60, 0xa7, 0x8e, 0x46, 0x93}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0xc8, 0x73, 0x8e, 0xfb, 0x1f, 0x84, 0x7d, 0x37, 0xd9, 0x26, 0x24, 0x37, 0xb8, 0x65, 0xb1, 0xed, 0xa0, 0x76, 0x9a, 0xf0, 0x8e, 0x3a, 0x9b, 0x20, 0x93, 0x27, 0x26, 0x2e, 0xc9, 0x9b, 0xde}} return a, nil } diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer.js index f8b383cd96..352c309b49 100644 --- a/eth/tracers/internal/tracers/call_tracer.js +++ b/eth/tracers/internal/tracers/call_tracer.js @@ -132,13 +132,12 @@ // If the call was a contract call, retrieve the gas usage and output if (call.gas !== undefined) { call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16); - - var ret = log.stack.peek(0); - if (!ret.equals(0)) { - call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); - } else if (call.error === undefined) { - call.error = "internal failure"; // TODO(karalabe): surface these faults somehow - } + } + var ret = log.stack.peek(0); + if (!ret.equals(0)) { + call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); + } else if (call.error === undefined) { + call.error = "internal failure"; // TODO(karalabe): surface these faults somehow } delete call.gasIn; delete call.gasCost; delete call.outOff; delete call.outLen; @@ -208,7 +207,7 @@ } else if (ctx.error !== undefined) { result.error = ctx.error; } - if (result.error !== undefined) { + if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) { delete result.output; } return this.finalize(result); diff --git a/eth/tracers/testdata/call_tracer_inner_instafail.json b/eth/tracers/testdata/call_tracer_inner_instafail.json new file mode 100644 index 0000000000..86070d1308 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_inner_instafail.json @@ -0,0 +1,72 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": { + "type": "CALL", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0", + "gas": "0x1a466", + "gasUsed": "0x1dc6", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "output": "0x", + "calls": [ + { + "type": "CALL", + "from": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "value": "0x14d1120d7b160000", + "error":"internal failure", + "input": "0x" + } + ] + } +} diff --git a/eth/tracers/testdata/call_tracer_revert_reason.json b/eth/tracers/testdata/call_tracer_revert_reason.json new file mode 100644 index 0000000000..094b044677 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_revert_reason.json @@ -0,0 +1,64 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock":1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": { + "error": "execution reverted", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2d7308", + "gasUsed": "0x588", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "type": "CALL", + "value": "0x0", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + } +} diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index cd625be0fb..18f8eb12aa 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -269,9 +269,31 @@ func TestCallTracer(t *testing.T) { t.Fatalf("failed to unmarshal trace result: %v", err) } - if !reflect.DeepEqual(ret, test.Result) { + if !jsonEqual(ret, test.Result) { + // uncomment this for easier debugging + //have, _ := json.MarshalIndent(ret, "", " ") + //want, _ := json.MarshalIndent(test.Result, "", " ") + //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) } }) } } + +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqual(x, y interface{}) bool { + xTrace := new(callTrace) + yTrace := new(callTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false + } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} From a6daef32015ebf4bbbfcd43fe428f2bc0d53205b Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:16:46 -0500 Subject: [PATCH 026/105] go mod tidy Signed-off-by: meows --- go.mod | 1 - go.sum | 49 +++++++++++++++++++------------------------------ 2 files changed, 19 insertions(+), 31 deletions(-) diff --git a/go.mod b/go.mod index a1e7a800dd..92ddfbb6e5 100755 --- a/go.mod +++ b/go.mod @@ -73,5 +73,4 @@ require ( gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 gotest.tools v2.2.0+incompatible // indirect - storj.io/uplink v1.1.2 ) diff --git a/go.sum b/go.sum index 79af999fea..4de62984bd 100755 --- a/go.sum +++ b/go.sum @@ -57,21 +57,15 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= -github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk= -github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -158,6 +152,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -165,9 +160,9 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= @@ -204,6 +199,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -218,6 +214,7 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -227,10 +224,14 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -249,7 +250,6 @@ github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -267,6 +267,7 @@ github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hz github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -274,12 +275,12 @@ github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3t github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -293,6 +294,7 @@ github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssy github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -320,13 +322,11 @@ github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bc github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= -github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= -github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -351,10 +351,10 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ= @@ -368,16 +368,10 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo= -github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -390,8 +384,6 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -429,9 +421,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -462,8 +452,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -496,6 +484,7 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -528,6 +517,7 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -538,6 +528,7 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHN gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= @@ -547,12 +538,10 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -storj.io/common v0.0.0-20200611114417-9a3d012fdb62/go.mod h1:6S6Ub92/BB+ofU7hbyPcm96b4Q1ayyN0HLog+3u+wGc= -storj.io/drpc v0.0.12/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= -storj.io/uplink v1.1.2/go.mod h1:UkdYN/dfSgv+d8fBUoZTrX2oLdj9gzX6Q7tp3CojgKA= From 154b902f91d19f1c5f6accf970b98d6231f8021c Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:17:18 -0500 Subject: [PATCH 027/105] rawdb: fix import path FullImmutabilityThreshold Signed-off-by: meows --- core/rawdb/freezer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 5c4e9faf59..68a598bc17 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -106,7 +106,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { } // Open all the supported data tables freezer := &freezer{ - threshold: params.FullImmutabilityThreshold, + threshold: vars.FullImmutabilityThreshold, tables: make(map[string]*freezerTable), instanceLock: lock, trigger: make(chan chan struct{}), From 3a9f942245cb22948bb671cf1a5ca73d8e195bde Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:16:16 -0500 Subject: [PATCH 028/105] core,rawdb,miner,tracers: remove unused imports Signed-off-by: meows --- core/block_validator.go | 1 - core/genesis.go | 3 +-- core/rawdb/freezer.go | 1 - eth/tracers/tracers_test.go | 1 - miner/worker.go | 1 - 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/core/block_validator.go b/core/block_validator.go index 64bf928690..8c3773695a 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/vars" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) diff --git a/core/genesis.go b/core/genesis.go index f422e57008..5472236bb3 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -27,12 +27,11 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/params/confp" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) // SetupGenesisBlock writes or updates the genesis block in db. diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 68a598bc17..c8f2691807 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/params/vars" "github.com/prometheus/tsdb/fileutil" ) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 291c948bd7..e38f4cdc4d 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -39,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" - "github.com/go-test/deep" ) // To generate a new callTracer test, copy paste the makeTest method below into diff --git a/miner/worker.go b/miner/worker.go index 4eada98fae..99403f531d 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params/types/ctypes" "github.com/ethereum/go-ethereum/params/vars" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) From 64404d9c1a6817cbcb4ee7bef6e4faa17a2234bd Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:20:11 -0500 Subject: [PATCH 029/105] graphql: fix no method ChainId Signed-off-by: meows --- graphql/graphql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 559da8aaaa..ad0e4453d4 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1045,7 +1045,7 @@ func (r *Resolver) ProtocolVersion(ctx context.Context) (int32, error) { } func (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) { - return hexutil.Big(*r.backend.ChainConfig().ChainID), nil + return hexutil.Big(*r.backend.ChainConfig().GetChainID()), nil } // SyncState represents the synchronisation status returned from the `syncing` accessor. From a5f7028c8fb4385fff90eefb1aaf4df1a0e46587 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:20:58 -0500 Subject: [PATCH 030/105] backends: fix TxGas not declared by package params Signed-off-by: meows --- accounts/abi/bind/backends/simulated_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 4e5b145b8b..a7cb7e44ce 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -158,7 +158,7 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) // Create tx and send - tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), vars.TxGas, big.NewInt(1), nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -179,7 +179,7 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) } // Put a transaction after adjusting time - tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), vars.TxGas, big.NewInt(1), nil) signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) From c279b9a03cd62a3d4930c07fc165d6c4d9825bdd Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:24:01 -0500 Subject: [PATCH 031/105] core: fix types.DeriveSha sig use, test rollback->sethead Signed-off-by: meows --- core/blockchain_remotefreezer_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/blockchain_remotefreezer_test.go b/core/blockchain_remotefreezer_test.go index de3231cc82..ca74260b47 100644 --- a/core/blockchain_remotefreezer_test.go +++ b/core/blockchain_remotefreezer_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/params/types/genesisT" "github.com/ethereum/go-ethereum/params/vars" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" ) // Tests in this file duplicate select tests from blockchain_test.go, @@ -194,7 +195,7 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { for _, v := range headers[pinch:] { rollbackHeaders = append(rollbackHeaders, v.Hash()) } - ancient.Rollback(rollbackHeaders) + ancient.SetHead(headers[pinch].Number.Uint64()) // Reinsert the rolled-back headers and receipts. if n, err := ancient.InsertHeaderChain(headers[pinch:], 1); err != nil { @@ -232,12 +233,12 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(arblock.Transactions()) || types.DeriveSha(anblock.Transactions()) != types.DeriveSha(arblock.Transactions()) { + } else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) { t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) } - if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { + if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) { t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) } } From dbfe59ce00c4bdeb21dc66f16fc75579771aaeae Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:27:43 -0500 Subject: [PATCH 032/105] core: fix MustCommit Genesis calls Signed-off-by: meows --- core/blockchain_repair_test.go | 4 +++- core/blockchain_sethead_test.go | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 27903dd06b..96d71d978e 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/types/genesisT" ) // Tests a recovery for a short canonical chain where a recent block was already @@ -1573,9 +1574,10 @@ func testRepair(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) + genesis = MustCommitGenesis(db, new(genesisT.Genesis)) engine = ethash.NewFullFaker() ) + chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index dc1368ff4b..b9b4cdc833 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/params/types/genesisT" ) // rewindTest is a test case for chain rollback upon user request. @@ -1764,7 +1765,7 @@ func testSetHead(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) + genesis = MustCommitGenesis(db, new(genesisT.Genesis)) engine = ethash.NewFullFaker() ) chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) From 297b3cbdc4b6eef3f71cd33826189093c10184f0 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 07:30:51 -0500 Subject: [PATCH 033/105] rawdb,fetcher: (lint) goimports -w Signed-off-by: meows --- core/rawdb/schema.go | 2 -- eth/fetcher/block_fetcher_test.go | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a6023576dc..03856de1f8 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -225,7 +225,6 @@ func preimageKey(hash common.Hash) []byte { return append(preimagePrefix, hash.Bytes()...) } - // codeKey = codePrefix + hash func codeKey(hash common.Hash) []byte { return append(codePrefix, hash.Bytes()...) @@ -240,7 +239,6 @@ func IsCodeKey(key []byte) (bool, []byte) { return false, nil } - // ConfigKey = ConfigPrefix + hash func ConfigKey(hash common.Hash) []byte { return append(ConfigPrefix, hash.Bytes()...) diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index dc14e207bc..b19db24083 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -31,8 +31,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/params/vars" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -40,7 +40,7 @@ var ( testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddress = crypto.PubkeyToAddress(testKey.PublicKey) genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) - unknownBlock = types.NewBlock(&types.Header{GasLimit: vars.GenesisGasLimit}, nil, nil, nil, new(trie.Trie)) + unknownBlock = types.NewBlock(&types.Header{GasLimit: vars.GenesisGasLimit}, nil, nil, nil, new(trie.Trie)) ) // makeChain creates a chain of n blocks starting at and including parent. From 5d0928c05995dffef98c1e6dfc4635d9840354fa Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 08:17:21 -0500 Subject: [PATCH 034/105] lib,core,rawdb,test.out: fix TestFastVsFullChains_RemoteFreezer Signed-off-by: meows --- cmd/ancient-store-mem/lib/mem.go | 16 +++--- core/blockchain_remotefreezer_test.go | 14 ++--- core/rawdb/database.go | 2 +- core/rawdb/freezer.go | 1 + core/rawdb/freezer_remote_client.go | 80 ++++++++++++++++++++++----- 5 files changed, 83 insertions(+), 30 deletions(-) diff --git a/cmd/ancient-store-mem/lib/mem.go b/cmd/ancient-store-mem/lib/mem.go index ec96881376..0d923adb94 100644 --- a/cmd/ancient-store-mem/lib/mem.go +++ b/cmd/ancient-store-mem/lib/mem.go @@ -60,7 +60,7 @@ func (f *MemFreezerRemoteServerAPI) Reset() { } func (f *MemFreezerRemoteServerAPI) HasAncient(kind string, number uint64) (bool, error) { - fmt.Println("mock server called", "method=HasAncient") + // fmt.Println("mock server called", "method=HasAncient") f.mu.Lock() defer f.mu.Unlock() _, ok := f.store[f.storeKey(kind, number)] @@ -68,7 +68,7 @@ func (f *MemFreezerRemoteServerAPI) HasAncient(kind string, number uint64) (bool } func (f *MemFreezerRemoteServerAPI) Ancient(kind string, number uint64) ([]byte, error) { - fmt.Println("mock server called", "method=Ancient") + // fmt.Println("mock server called", "method=Ancient") f.mu.Lock() defer f.mu.Unlock() v, ok := f.store[f.storeKey(kind, number)] @@ -79,12 +79,12 @@ func (f *MemFreezerRemoteServerAPI) Ancient(kind string, number uint64) ([]byte, } func (f *MemFreezerRemoteServerAPI) Ancients() (uint64, error) { - fmt.Println("mock server called", "method=Ancients") + // fmt.Println("mock server called", "method=Ancients") return f.count, nil } func (f *MemFreezerRemoteServerAPI) AncientSize(kind string) (uint64, error) { - fmt.Println("mock server called", "method=AncientSize") + // fmt.Println("mock server called", "method=AncientSize") sum := uint64(0) for k, v := range f.store { if strings.HasPrefix(k, kind) { @@ -95,7 +95,7 @@ func (f *MemFreezerRemoteServerAPI) AncientSize(kind string) (uint64, error) { } func (f *MemFreezerRemoteServerAPI) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error { - fmt.Println("mock server called", "method=AppendAncient", "number=", number, "header", fmt.Sprintf("%x", header)) + // fmt.Println("mock server called", "method=AppendAncient", "number=", number, "header", fmt.Sprintf("%x", header)) fieldNames := []string{ freezerRemoteHashTable, freezerRemoteHeaderTable, @@ -118,7 +118,7 @@ func (f *MemFreezerRemoteServerAPI) AppendAncient(number uint64, hash, header, b } func (f *MemFreezerRemoteServerAPI) TruncateAncients(n uint64) error { - fmt.Println("mock server called", "method=TruncateAncients") + // fmt.Println("mock server called", "method=TruncateAncients") f.count = n f.mu.Lock() defer f.mu.Unlock() @@ -136,11 +136,11 @@ func (f *MemFreezerRemoteServerAPI) TruncateAncients(n uint64) error { } func (f *MemFreezerRemoteServerAPI) Sync() error { - fmt.Println("mock server called", "method=Sync") + // fmt.Println("mock server called", "method=Sync") return nil } func (f *MemFreezerRemoteServerAPI) Close() error { - fmt.Println("mock server called", "method=Close") + // fmt.Println("mock server called", "method=Close") return nil } diff --git a/core/blockchain_remotefreezer_test.go b/core/blockchain_remotefreezer_test.go index ca74260b47..f1b70a4c60 100644 --- a/core/blockchain_remotefreezer_test.go +++ b/core/blockchain_remotefreezer_test.go @@ -190,18 +190,16 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } // Test a rollback, causing the ancient store to use the TruncateAncient method. - pinch := len(blocks) / 4 - rollbackHeaders := []common.Hash{} - for _, v := range headers[pinch:] { - rollbackHeaders = append(rollbackHeaders, v.Hash()) + if err := ancient.SetHead(0); err != nil { + t.Fatalf("set head err: %v", err) } - ancient.SetHead(headers[pinch].Number.Uint64()) // Reinsert the rolled-back headers and receipts. - if n, err := ancient.InsertHeaderChain(headers[pinch:], 1); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) + if n, err := ancient.InsertHeaderChain(headers, 1); err != nil { + t.Log(ancient.CurrentHeader().Number.Uint64()) + t.Fatalf("failed to insert header %d (#%d): %v", n, headers[n].Number.Uint64(), err) } - if n, err := ancient.InsertReceiptChain(blocks[pinch:], receipts, ancientLimit); err != nil { + if n, err := ancient.InsertReceiptChain(blocks, receipts, ancientLimit); err != nil { t.Fatalf("failed to insert receipt %d: %v", n, err) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index cd3d9b926c..c08d541886 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -203,7 +203,7 @@ Please set --ancient.rpc to the correct path, and/or review the remote freezer's } } // Freezer is consistent with the key-value database, permit combining the two - go freezeRemote(db, frdb, frdb.quit) + go freezeRemote(db, frdb, frdb.threshold, frdb.quit, frdb.trigger) return &freezerdb{ KeyValueStore: db, diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index c8f2691807..e23171ae01 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -299,6 +299,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) { backoff = true continue } + //here number := ReadHeaderNumber(nfdb, hash) threshold := atomic.LoadUint64(&f.threshold) diff --git a/core/rawdb/freezer_remote_client.go b/core/rawdb/freezer_remote_client.go index e990fa6517..c54d740f8b 100644 --- a/core/rawdb/freezer_remote_client.go +++ b/core/rawdb/freezer_remote_client.go @@ -1,6 +1,7 @@ package rawdb import ( + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -16,6 +17,9 @@ import ( type FreezerRemoteClient struct { client *rpc.Client quit chan struct{} + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + closeOnce sync.Once } const ( @@ -37,6 +41,9 @@ func newFreezerRemoteClient(endpoint string) (*FreezerRemoteClient, error) { } return &FreezerRemoteClient{ client: client, + threshold: vars.FullImmutabilityThreshold, + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } @@ -110,10 +117,13 @@ func (api *FreezerRemoteClient) Sync() error { // to exist unmodified and untouched by the remote freezer client, which demands // a slightly different signature, and uses the freezer.Ancients() method instead // of direct access to the atomic freezer.frozen field. -func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan struct{}) { +func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, threshold uint64, quitChan chan struct{}, triggerChanChan chan chan struct{}) { nfdb := &nofreezedb{KeyValueStore: db} - backoff := false + var ( + backoff bool + triggered chan struct{} // Used in tests + ) for { select { case <-quitChan: @@ -122,14 +132,20 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st default: } if backoff { + // If we were doing a manual trigger, notify it + if triggered != nil { + triggered <- struct{}{} + triggered = nil + } select { case <-time.NewTimer(freezerRecheckInterval).C: backoff = false + case triggered = <-triggerChanChan: + backoff = false case <-quitChan: return } } - // Retrieve the freezing threshold. hash := ReadHeadBlockHash(nfdb) if hash == (common.Hash{}) { @@ -137,25 +153,25 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st backoff = true continue } - numFrozen, err := f.Ancients() if err != nil { log.Crit("ancient db freeze", "error", err) } - number := ReadHeaderNumber(nfdb, hash) + // threshold := atomic.LoadUint64(&f.threshold) + switch { case number == nil: log.Error("Current full block number unavailable", "hash", hash) backoff = true continue - case *number < vars.FullImmutabilityThreshold: - log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", vars.FullImmutabilityThreshold) + case *number < threshold: + log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) backoff = true continue - case *number-vars.FullImmutabilityThreshold <= numFrozen: + case *number-threshold <= numFrozen: log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", numFrozen) backoff = true continue @@ -167,7 +183,7 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st continue } // Seems we have data ready to be frozen, process in usable batches - limit := *number - vars.FullImmutabilityThreshold + limit := *number - threshold if limit-numFrozen > freezerBatchLimit { limit = numFrozen + freezerBatchLimit } @@ -176,7 +192,7 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st first = numFrozen ancients = make([]common.Hash, 0, limit-numFrozen) ) - for numFrozen < limit { + for numFrozen <= limit { // Retrieves all the components of the canonical block hash := ReadCanonicalHash(nfdb, numFrozen) if hash == (common.Hash{}) { @@ -208,7 +224,6 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st if err := f.AppendAncient(numFrozen, hash[:], header, body, receipts, td); err != nil { break } - numFrozen++ // Manually increment numFrozen (save a call) ancients = append(ancients, hash) } // Batch of blocks have been frozen, flush them before wiping from leveldb @@ -228,11 +243,15 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st log.Crit("Failed to delete frozen canonical blocks", "err", err) } batch.Reset() - // Wipe out side chain also. + + // Wipe out side chains also and track dangling side chians + var dangling []common.Hash for number := first; number < numFrozen; number++ { // Always keep the genesis block in active database if number != 0 { - for _, hash := range ReadAllHashes(db, number) { + dangling = ReadAllHashes(db, number) + for _, hash := range dangling { + log.Trace("Deleting side chain", "number", number, "hash", hash) DeleteBlock(batch, hash, number) } } @@ -240,6 +259,41 @@ func freezeRemote(db ethdb.KeyValueStore, f ethdb.AncientStore, quitChan chan st if err := batch.Write(); err != nil { log.Crit("Failed to delete frozen side blocks", "err", err) } + batch.Reset() + + // Step into the future and delete and dangling side chains + if numFrozen > 0 { + tip := numFrozen + for len(dangling) > 0 { + drop := make(map[common.Hash]struct{}) + for _, hash := range dangling { + log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + drop[hash] = struct{}{} + } + children := ReadAllHashes(db, tip) + for i := 0; i < len(children); i++ { + // Dig up the child and ensure it's dangling + child := ReadHeader(nfdb, children[i], tip) + if child == nil { + log.Error("Missing dangling header", "number", tip, "hash", children[i]) + continue + } + if _, ok := drop[child.ParentHash]; !ok { + children = append(children[:i], children[i+1:]...) + i-- + continue + } + // Delete all block data associated with the child + log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash) + DeleteBlock(batch, children[i], tip) + } + dangling = children + tip++ + } + if err := batch.Write(); err != nil { + log.Crit("Failed to delete dangling side blocks", "err", err) + } + } // Log something friendly for the user context := []interface{}{ "blocks", numFrozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", numFrozen - 1, From bf84a9adc981c01bffc21e8b26dfe8dea153b52e Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 08:22:44 -0500 Subject: [PATCH 035/105] core: remove invalid test blockchain_repair_test.go is now a thing, and some things appear to have changed w/r/t recovery expectations. Signed-off-by: meows --- core/blockchain_remotefreezer_test.go | 71 --------------------------- 1 file changed, 71 deletions(-) diff --git a/core/blockchain_remotefreezer_test.go b/core/blockchain_remotefreezer_test.go index f1b70a4c60..a255e81a0d 100644 --- a/core/blockchain_remotefreezer_test.go +++ b/core/blockchain_remotefreezer_test.go @@ -251,77 +251,6 @@ func TestFastVsFullChains_RemoteFreezer(t *testing.T) { } } -func TestBlockchainRecovery_RemoteFreezer(t *testing.T) { - // Configure and generate a sample block chain - var ( - gendb = rawdb.NewMemoryDatabase() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000) - gspec = &genesisT.Genesis{Config: params.TestChainConfig, Alloc: genesisT.GenesisAlloc{address: {Balance: funds}}} - genesis = MustCommitGenesis(gendb, gspec) - ) - height := uint64(1024) - blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil) - - // Import the chain as a ancient-first node and ensure all pointers are updated - // Freezer style fast import the chain. - freezerRPCEndpoint, server, ancientDb := testRPCRemoteFreezer(t) - if n, err := ancientDb.Ancients(); err != nil { - t.Fatalf("ancients: %v", err) - } else if n != 0 { - t.Logf("truncating pre-existing ancients from: %d (truncating to 0)", n) - err = ancientDb.TruncateAncients(0) - if err != nil { - t.Fatalf("truncate ancients: %v", err) - } - } - if server != nil { - defer os.RemoveAll(filepath.Dir(freezerRPCEndpoint)) - defer server.Stop() - } - defer ancientDb.Close() // Cause the Close method to be called. - defer func() { - // A deferred truncation to 0 will allow a single freezer instance to - // handle multiple tests in serial. - if err := ancientDb.TruncateAncients(0); err != nil { - t.Fatalf("deferred truncate ancients error: %v", err) - } - }() - - MustCommitGenesis(ancientDb, gspec) - ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) - - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - if n, err := ancient.InsertHeaderChain(headers, 1); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - ancient.Stop() - - // Destroy head fast block manually - midBlock := blocks[len(blocks)/2] - rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) - - // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancient.Stop() - if num := ancient.CurrentBlock().NumberU64(); num != 0 { - t.Errorf("head block mismatch: have #%v, want #%v", num, 0) - } - if num := ancient.CurrentFastBlock().NumberU64(); num != midBlock.NumberU64() { - t.Errorf("head fast-block mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } - if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() { - t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } -} - func TestIncompleteAncientReceiptChainInsertion_RemoteFreezer(t *testing.T) { // Configure and generate a sample block chain var ( From ede0a951b10caea4be51f93ed72577a6978c43f8 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 08:41:30 -0500 Subject: [PATCH 036/105] rawdb: (lint) goimports -w Signed-off-by: meows --- core/rawdb/freezer_remote_client.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/core/rawdb/freezer_remote_client.go b/core/rawdb/freezer_remote_client.go index c54d740f8b..d0ad602b99 100644 --- a/core/rawdb/freezer_remote_client.go +++ b/core/rawdb/freezer_remote_client.go @@ -15,10 +15,10 @@ import ( // The struct's methods delegate the business logic to an external server // that is responsible for managing an actual ancient store. type FreezerRemoteClient struct { - client *rpc.Client - quit chan struct{} - threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) - trigger chan chan struct{} // Manual blocking freeze trigger, test determinism + client *rpc.Client + quit chan struct{} + threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) + trigger chan chan struct{} // Manual blocking freeze trigger, test determinism closeOnce sync.Once } @@ -40,10 +40,10 @@ func newFreezerRemoteClient(endpoint string) (*FreezerRemoteClient, error) { return nil, err } return &FreezerRemoteClient{ - client: client, + client: client, threshold: vars.FullImmutabilityThreshold, - quit: make(chan struct{}), - trigger: make(chan chan struct{}), + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } From df0126cceaea9a04191fc04160e7e225494d5803 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 09:46:43 -0500 Subject: [PATCH 037/105] rawdb: add missing import Signed-off-by: meows --- core/rawdb/freezer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index e23171ae01..1da5694264 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params/vars" "github.com/prometheus/tsdb/fileutil" ) From 7269a1748a81ed106386dca04fca56d64a1a1eb0 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 27 Aug 2020 13:26:53 -0500 Subject: [PATCH 038/105] core: fix initialization from freezer Blockchain initialization from a kv-destroyed but freezer-ok databases failed. The two problems were: - bc.empty was returning a false-negative when the database had no KV full/fast/head block data - bc.loadLastState -> Reset -> SetHead were endlessly looping, or panicing (because SetHead) does not tolerate CurrentBlock() returning nil. Signed-off-by: meows --- core/blockchain.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index dc80902112..91f291aee9 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -274,6 +274,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp if frozen > 0 { txIndexBlock = frozen } + bc.writeHeadBlock(bc.genesisBlock) } if err := bc.loadLastState(); err != nil { return nil, err @@ -374,8 +375,11 @@ func (bc *BlockChain) GetVMConfig() *vm.Config { // into node seamlessly. func (bc *BlockChain) empty() bool { genesis := bc.genesisBlock.Hash() - for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { - if hash != genesis { + for _, hash := range []common.Hash{ + rawdb.ReadHeadBlockHash(bc.db), + rawdb.ReadHeadHeaderHash(bc.db), + rawdb.ReadHeadFastBlockHash(bc.db)} { + if hash != (common.Hash{}) && hash != genesis { return false } } From 448fb71b51c3fdbdbf6315890713f0276c68f90e Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 28 Aug 2020 06:41:45 -0500 Subject: [PATCH 039/105] core: add nil condition and comment for writeHeadBlock if bc empty Signed-off-by: meows --- core/blockchain.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index 91f291aee9..4e03dd4580 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -274,7 +274,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig ctyp if frozen > 0 { txIndexBlock = frozen } - bc.writeHeadBlock(bc.genesisBlock) + // loadLastState and other steps below assume that CurrentBlock is not nil. + if bc.CurrentBlock() == nil { + bc.writeHeadBlock(bc.genesisBlock) + } } if err := bc.loadLastState(); err != nil { return nil, err From e8340390c838fce75929b2f2e6d5f7af7d7abd5c Mon Sep 17 00:00:00 2001 From: meows Date: Tue, 8 Sep 2020 15:16:57 -0500 Subject: [PATCH 040/105] params: bump version to v1.11.13-stable Signed-off-by: meows --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index d2153b798e..295fae6e93 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 13 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 13 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string VersionName = "CoreGeth" ) From 172c244db9f476f0a62643c15b7a85a3ded3a9e0 Mon Sep 17 00:00:00 2001 From: meows Date: Tue, 8 Sep 2020 15:18:26 -0500 Subject: [PATCH 041/105] params: bump version to v1.11.14-unstable Signed-off-by: meows --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 295fae6e93..a5ad6b8ebe 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 13 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 14 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string VersionName = "CoreGeth" ) From 4949222f638f34816b12a6cb4c555d1f006fc289 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 08:21:12 -0500 Subject: [PATCH 042/105] core: refactor reorganization function and data Extracts the reorganization data-aggregation logic from its execution logic. This will allow observation and arbitration using the proposed reorganization data without necessarily acting on it. Signed-off-by: meows --- core/blockchain.go | 187 ++++++++++++++++++++++++++++++++------------- 1 file changed, 135 insertions(+), 52 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 4e03dd4580..fe44dc2703 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1419,7 +1419,8 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { current := bc.CurrentBlock() if block.ParentHash() != current.Hash() { - if err := bc.reorg(current, block); err != nil { + d := bc.getReorgData(current, block) + if err := bc.reorg(d); err != nil { return err } } @@ -1543,7 +1544,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if reorg { // Reorganise the chain if the parent is not the head block if block.ParentHash() != currentBlock.Hash() { - if err := bc.reorg(currentBlock, block); err != nil { + d := bc.getReorgData(currentBlock, block) + if err := bc.reorg(d); err != nil { return NonStatTy, err } } @@ -2045,17 +2047,44 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i return 0, nil } -// reorg takes two blocks, an old chain and a new chain and will reconstruct the -// blocks and inserts them to be part of the new canonical chain and accumulates -// potential missing transactions and post an event about them. -func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { +// errReorgImpossible denotes impossible reorgs. +// And yet, there is an error for if, and when they occur. +// Ah, sweet mystery of life. +var errReorgImpossible = errors.New("impossible reorg") + +// errReorgNewChain denotes an attempted reorg to an invalid incoming chain. +var errReorgNewChain = errors.New("invalid new chain") + +// errReorgNewChain denotes an attempted reorg to an invalid existing chain. +var errReorgOldChain = errors.New("invalid old chain") + +// reorgData is consumed by the reorg method. +type reorgData struct { + oldBlock *types.Block + newBlock *types.Block + + newChain types.Blocks + oldChain types.Blocks + commonBlock *types.Block + + deletedTxs types.Transactions + + deletedLogs [][]*types.Log + rebirthLogs [][]*types.Log + + err error +} + +// getReorgData gets the data required by the chain reorg method. +// This data is aggregated separately to facilitate the modularization of reorg acceptance +// arbitration logic. +func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) (*reorgData) { var ( newChain types.Blocks oldChain types.Blocks commonBlock *types.Block deletedTxs types.Transactions - addedTxs types.Transactions deletedLogs [][]*types.Log rebirthLogs [][]*types.Log @@ -2089,20 +2118,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } } - // mergeLogs returns a merged log slice with specified sort order. - mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { - var ret []*types.Log - if reverse { - for i := len(logs) - 1; i >= 0; i-- { - ret = append(ret, logs[i]...) - } - } else { - for i := 0; i < len(logs); i++ { - ret = append(ret, logs[i]...) - } - } - return ret - } ) // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { @@ -2119,10 +2134,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } if oldBlock == nil { - return fmt.Errorf("invalid old chain") + return &reorgData{err: errReorgOldChain} } if newBlock == nil { - return fmt.Errorf("invalid new chain") + return &reorgData{err: errReorgNewChain} } // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found @@ -2142,46 +2157,114 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // Step back with both chains oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) if oldBlock == nil { - return fmt.Errorf("invalid old chain") + return &reorgData{err: errReorgOldChain} } newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if newBlock == nil { - return fmt.Errorf("invalid new chain") + return &reorgData{err: errReorgNewChain} + } + } + + if len(oldChain) == 0 || len(newChain) == 0 { + return &reorgData{err: errReorgImpossible} + } + return &reorgData{ + oldBlock: oldBlock, + newBlock: newBlock, + newChain: newChain, + oldChain: oldChain, + commonBlock: commonBlock, + deletedTxs: deletedTxs, + deletedLogs: deletedLogs, + rebirthLogs: rebirthLogs, + } +} + +// reorg takes two blocks, an old chain and a new chain and will reconstruct the +// blocks and inserts them to be part of the new canonical chain and accumulates +// potential missing transactions and post an event about them. +func (bc *BlockChain) reorg(data *reorgData) error { + if data.err != nil { + if data.err == errReorgImpossible { + log.Error("Impossible reorg, please file an issue", "oldnum", data.oldBlock.Number(), "oldhash", data.oldBlock.Hash(), "newnum", data.newBlock.Number(), "newhash", data.newBlock.Hash()) } + return data.err } + var ( + addedTxs types.Transactions + // mergeLogs returns a merged log slice with specified sort order. + mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { + var ret []*types.Log + if reverse { + for i := len(logs) - 1; i >= 0; i-- { + ret = append(ret, logs[i]...) + } + } else { + for i := 0; i < len(logs); i++ { + ret = append(ret, logs[i]...) + } + } + return ret + } + // collectLogs collects the logs that were generated or removed during + // the processing of the block that corresponds with the given hash. + // These logs are later announced as deleted or reborn + collectLogs = func(hash common.Hash, removed bool) { + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return + } + receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) + + var logs []*types.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } else { + } + logs = append(logs, &l) + } + } + if len(logs) > 0 { + if removed { + data.deletedLogs = append(data.deletedLogs, logs) + } else { + data.rebirthLogs = append(data.rebirthLogs, logs) + } + } + } + ) + // Ensure the user sees large reorgs - if len(oldChain) > 0 && len(newChain) > 0 { - logFn := log.Info - msg := "Chain reorg detected" - if len(oldChain) > 63 { - msg = "Large chain reorg detected" - logFn = log.Warn - } - logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), - "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) - blockReorgAddMeter.Mark(int64(len(newChain))) - blockReorgDropMeter.Mark(int64(len(oldChain))) - blockReorgMeter.Mark(1) - } else { - log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) - return fmt.Errorf("impossible reorg") + logFn := log.Info + msg := "Chain reorg detected" + if len(data.oldChain) > 63 { + msg = "Large chain reorg detected" + logFn = log.Warn } + logFn(msg, "number", data.commonBlock.Number(), "hash", data.commonBlock.Hash(), + "drop", len(data.oldChain), "dropfrom", data.oldChain[0].Hash(), "add", len(data.newChain), "addfrom", data.newChain[0].Hash()) + blockReorgAddMeter.Mark(int64(len(data.newChain))) + blockReorgDropMeter.Mark(int64(len(data.oldChain))) + // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. - for i := len(newChain) - 1; i >= 1; i-- { + for i := len(data.newChain) - 1; i >= 1; i-- { // Insert the block in the canonical way, re-writing history - bc.writeHeadBlock(newChain[i]) + bc.writeHeadBlock(data.newChain[i]) // Collect reborn logs due to chain reorg - collectLogs(newChain[i].Hash(), false) + collectLogs(data.newChain[i].Hash(), false) // Collect the new added transactions. - addedTxs = append(addedTxs, newChain[i].Transactions()...) + addedTxs = append(addedTxs, data.newChain[i].Transactions()...) } // Delete useless indexes right now which includes the non-canonical // transaction indexes, canonical chain indexes which above the head. indexesBatch := bc.db.NewBatch() - for _, tx := range types.TxDifference(deletedTxs, addedTxs) { + for _, tx := range types.TxDifference(data.deletedTxs, addedTxs) { rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) } // Delete any canonical number assignments above the new head @@ -2200,15 +2283,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // this goroutine if there are no events to fire, but realistcally that only // ever happens if we're reorging empty blocks, which will only happen on idle // networks where performance is not an issue either way. - if len(deletedLogs) > 0 { - bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) + if len(data.deletedLogs) > 0 { + bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(data.deletedLogs, true)}) } - if len(rebirthLogs) > 0 { - bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) + if len(data.rebirthLogs) > 0 { + bc.logsFeed.Send(mergeLogs(data.rebirthLogs, false)) } - if len(oldChain) > 0 { - for i := len(oldChain) - 1; i >= 0; i-- { - bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + if len(data.oldChain) > 0 { + for i := len(data.oldChain) - 1; i >= 0; i-- { + bc.chainSideFeed.Send(ChainSideEvent{Block: data.oldChain[i]}) } } return nil From 7c86e18e399e36db5170191880a7aaf74f086c5a Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 08:31:09 -0500 Subject: [PATCH 043/105] core: refactor writeBlockWithState reorg logic This is only really an optimization, paving the way for installing additional reorg arbitration. Signed-off-by: meows --- core/blockchain.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index fe44dc2703..365918f055 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1528,7 +1528,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf reorg := externTd.Cmp(localTd) > 0 currentBlock = bc.CurrentBlock() - if !reorg && externTd.Cmp(localTd) == 0 { + if externTd.Cmp(localTd) == 0 { // Split same-difficulty blocks by number, then preferentially select // the block generated by the local miner as the canonical block. if block.NumberU64() < currentBlock.NumberU64() { @@ -1541,16 +1541,15 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) } } - if reorg { + status = CanonStatTy + discontiguousBlocks := block.ParentHash() != currentBlock.Hash() + if reorg && discontiguousBlocks { // Reorganise the chain if the parent is not the head block - if block.ParentHash() != currentBlock.Hash() { - d := bc.getReorgData(currentBlock, block) - if err := bc.reorg(d); err != nil { - return NonStatTy, err - } + d := bc.getReorgData(currentBlock, block) + if err := bc.reorg(d); err != nil { + return NonStatTy, err } - status = CanonStatTy - } else { + } else if discontiguousBlocks { status = SideStatTy } // Set new head. @@ -2078,7 +2077,7 @@ type reorgData struct { // getReorgData gets the data required by the chain reorg method. // This data is aggregated separately to facilitate the modularization of reorg acceptance // arbitration logic. -func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) (*reorgData) { +func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { var ( newChain types.Blocks oldChain types.Blocks From 468b1528fa74712022bc4d3c44d825250ccb143b Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 09:44:47 -0500 Subject: [PATCH 044/105] core: init ecbp11355 (MESS) implementation Implements spec documented here: https://github.com/etclabscore/artificial-finality/blob/master/MESS.md MESS is an artificial finality mechanism endowing the blockchain's current segment with so-called 'gravity' by requiring a competing chain to satisfy an exponentially increasing relative total difficulty. Note that is change causes several tests with reorgs to fail. This will be addressed by implementing the feature as a chain configuration option. Signed-off-by: meows --- core/blockchain.go | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index 365918f055..65f6e80fbd 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "math" "math/big" mrand "math/rand" "sort" @@ -1543,12 +1544,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } status = CanonStatTy discontiguousBlocks := block.ParentHash() != currentBlock.Hash() - if reorg && discontiguousBlocks { + reorg = reorg && discontiguousBlocks + + if reorg { // Reorganise the chain if the parent is not the head block d := bc.getReorgData(currentBlock, block) + if d.err == nil { + d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) + } if err := bc.reorg(d); err != nil { return NonStatTy, err } + // Status is (remains) canon; reorg succeeded. + } else if discontiguousBlocks { status = SideStatTy } @@ -2179,9 +2187,40 @@ func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { } } +// errReorgFinality represents an error caused by artificial finality mechanisms. +var errReorgFinality = errors.New("finality-enforced invalid new chain") + +// ecpb11355 implements the "MESS" artificial finality mechanism +// "Modified Exponential Subject Scoring" used to prefer known chain segments +// over later-to-come counterparts, especially proposed segments stretching far into the past. +func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { + commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) + + proposedParentTD := bc.GetTd(proposed.ParentHash, proposed.Number.Uint64()-1) + proposedTD := new(big.Int).Add(proposed.Difficulty, proposedParentTD) + + localTD := bc.GetTd(current.Hash(), current.Number.Uint64()) + + tdRatio, _ := new(big.Float).Quo( + new(big.Float).SetInt(new(big.Int).Sub(proposedTD, commonAncestorTD)), + new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), + ).Float64() + + antiGravity := math.Pow(1.0001, float64(proposed.Time-commonAncestor.Time)) + + if tdRatio < antiGravity { + // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). + return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.3f) < antigravity(%0.3f)", errReorgFinality, tdRatio, antiGravity) + } + return nil +} + // reorg takes two blocks, an old chain and a new chain and will reconstruct the // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. +// If reorgData passed contains an a non-nil error, the method is expect to return it immediately. +// This kind-of-strange pattern is in place to allow the function to issue "special case" warning logs +// consistent with its behavior prior to refactoring. func (bc *BlockChain) reorg(data *reorgData) error { if data.err != nil { if data.err == errReorgImpossible { From 9ea9d7e3dbfd7382337ad5c7de1c858c12b96963 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 10:16:01 -0500 Subject: [PATCH 045/105] core,coregeth,ctypes,genesisT,goethereum,multigeth,parity: init ECBP11355 configuration interface implementation Implement ECBP11355 interface methods for supporting chain configuration types. Signed-off-by: meows --- core/blockchain.go | 13 ++++++++++++- params/types/coregeth/chain_config.go | 2 ++ .../types/coregeth/chain_config_configurator.go | 9 +++++++++ params/types/ctypes/configurator_iface.go | 2 ++ params/types/genesisT/genesis.go | 16 ++++++++++++---- .../types/goethereum/goethereum_configurator.go | 11 +++++++++++ .../multigethv0_chain_config_configurator.go | 11 +++++++++++ params/types/parity/parity_configurator.go | 11 +++++++++++ 8 files changed, 70 insertions(+), 5 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 65f6e80fbd..e357f91efc 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1549,9 +1549,20 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if reorg { // Reorganise the chain if the parent is not the head block d := bc.getReorgData(currentBlock, block) - if d.err == nil { + if d.err != nil { + // Will ALWAYS return the/an error, since the data.err field is non-nil. + // We leave the error to the reorg method to handle, if it wants to wrap it or log it or whatever. + if err := bc.reorg(d); err != nil { + return NonStatTy, err + } + } + + // Reorg data error was nil. + // Proceed with further reorg arbitration. + if bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) } + if err := bc.reorg(d); err != nil { return NonStatTy, err } diff --git a/params/types/coregeth/chain_config.go b/params/types/coregeth/chain_config.go index 9ff554b1a5..33da281e0e 100644 --- a/params/types/coregeth/chain_config.go +++ b/params/types/coregeth/chain_config.go @@ -173,6 +173,8 @@ type CoreGethChainConfig struct { ECIP1017EraRounds *big.Int `json:"ecip1017EraRounds,omitempty"` // ECIP1017 era rounds ECIP1080FBlock *big.Int `json:"ecip1080FBlock,omitempty"` + ECBP11355FBlock *big.Int `json:"ecbp11355FBlock,omitempty"` // ECBP11355:MESS artificial finality + DisposalBlock *big.Int `json:"disposalBlock,omitempty"` // Bomb disposal HF block SocialBlock *big.Int `json:"socialBlock,omitempty"` // Ethereum Social Reward block EthersocialBlock *big.Int `json:"ethersocialBlock,omitempty"` // Ethersocial Reward block diff --git a/params/types/coregeth/chain_config_configurator.go b/params/types/coregeth/chain_config_configurator.go index cd20e5da2f..ee6da04ab1 100644 --- a/params/types/coregeth/chain_config_configurator.go +++ b/params/types/coregeth/chain_config_configurator.go @@ -381,6 +381,15 @@ func (c *CoreGethChainConfig) SetEIP2537Transition(n *uint64) error { return nil } +func (c *CoreGethChainConfig) GetECBP11355Transition() *uint64 { + return bigNewU64(c.ECBP11355FBlock) +} + +func (c *CoreGethChainConfig) SetECBP11355Transition(n *uint64) error { + c.ECBP11355FBlock = setBig(c.ECBP11355FBlock, n) + return nil +} + func (c *CoreGethChainConfig) IsEnabled(fn func() *uint64, n *big.Int) bool { f := fn() if f == nil || n == nil { diff --git a/params/types/ctypes/configurator_iface.go b/params/types/ctypes/configurator_iface.go index e3ff71a2b1..a3941f762b 100644 --- a/params/types/ctypes/configurator_iface.go +++ b/params/types/ctypes/configurator_iface.go @@ -130,6 +130,8 @@ type ProtocolSpecifier interface { SetEIP1706Transition(n *uint64) error GetEIP2537Transition() *uint64 SetEIP2537Transition(n *uint64) error + GetECBP11355Transition() *uint64 + SetECBP11355Transition(n *uint64) error } type Forker interface { diff --git a/params/types/genesisT/genesis.go b/params/types/genesisT/genesis.go index ee5b1096ac..ee3b942ce8 100644 --- a/params/types/genesisT/genesis.go +++ b/params/types/genesisT/genesis.go @@ -528,22 +528,30 @@ func (g *Genesis) SetECIP1080Transition(n *uint64) error { return g.Config.SetECIP1080Transition(n) } -func (g Genesis) GetEIP1706Transition() *uint64 { +func (g *Genesis) GetEIP1706Transition() *uint64 { return g.Config.GetEIP1706Transition() } -func (g Genesis) SetEIP1706Transition(n *uint64) error { +func (g *Genesis) SetEIP1706Transition(n *uint64) error { return g.Config.SetEIP1706Transition(n) } -func (g Genesis) GetEIP2537Transition() *uint64 { +func (g *Genesis) GetEIP2537Transition() *uint64 { return g.Config.GetEIP2537Transition() } -func (g Genesis) SetEIP2537Transition(n *uint64) error { +func (g *Genesis) SetEIP2537Transition(n *uint64) error { return g.Config.SetEIP2537Transition(n) } +func (g *Genesis) GetECBP11355Transition() *uint64 { + return g.Config.GetECBP11355Transition() +} + +func (g *Genesis) SetECBP11355Transition(n *uint64) error { + return g.Config.SetECBP11355Transition(n) +} + func (g *Genesis) IsEnabled(fn func() *uint64, n *big.Int) bool { return g.Config.IsEnabled(fn, n) } diff --git a/params/types/goethereum/goethereum_configurator.go b/params/types/goethereum/goethereum_configurator.go index 2ae8c64a98..99fdc76b25 100644 --- a/params/types/goethereum/goethereum_configurator.go +++ b/params/types/goethereum/goethereum_configurator.go @@ -391,6 +391,17 @@ func (c *ChainConfig) SetEIP2537Transition(n *uint64) error { return nil } +func (c *ChainConfig) GetECBP11355Transition() *uint64 { + return nil +} + +func (c *ChainConfig) SetECBP11355Transition(n *uint64) error { + if n == nil { + return nil + } + return ctypes.ErrUnsupportedConfigFatal +} + func (c *ChainConfig) IsEnabled(fn func() *uint64, n *big.Int) bool { f := fn() if f == nil || n == nil { diff --git a/params/types/multigeth/multigethv0_chain_config_configurator.go b/params/types/multigeth/multigethv0_chain_config_configurator.go index 46e02e63e7..ebdf349272 100644 --- a/params/types/multigeth/multigethv0_chain_config_configurator.go +++ b/params/types/multigeth/multigethv0_chain_config_configurator.go @@ -416,6 +416,17 @@ func (c *ChainConfig) SetEIP2537Transition(n *uint64) error { return ctypes.ErrUnsupportedConfigFatal } +func (c *ChainConfig) GetECBP11355Transition() *uint64 { + return nil +} + +func (c *ChainConfig) SetECBP11355Transition(n *uint64) error { + if n == nil { + return nil + } + return ctypes.ErrUnsupportedConfigFatal +} + func (c *ChainConfig) IsEnabled(fn func() *uint64, n *big.Int) bool { f := fn() if f == nil || n == nil { diff --git a/params/types/parity/parity_configurator.go b/params/types/parity/parity_configurator.go index 2b135a5b1b..7f82f26150 100644 --- a/params/types/parity/parity_configurator.go +++ b/params/types/parity/parity_configurator.go @@ -624,6 +624,17 @@ func (spec *ParityChainSpec) SetEIP2537Transition(n *uint64) error { return nil } +func (spec *ParityChainSpec) GetECBP11355Transition() *uint64 { + return nil +} + +func (spec *ParityChainSpec) SetECBP11355Transition(n *uint64) error { + if n == nil { + return nil + } + return ctypes.ErrUnsupportedConfigFatal +} + func (spec *ParityChainSpec) IsEnabled(fn func() *uint64, n *big.Int) bool { f := fn() if f == nil || n == nil { From 2718253c750ebfd93273054cde509a0b3348c75c Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 10:20:09 -0500 Subject: [PATCH 046/105] core: implement new blockReorgMeter in reorg method This meter was added to ethereum/go-ethereum after my original refactor was done, so my update missed it, and it was caught by the deadcode linter. Add it, fix it. Signed-off-by: meows --- core/blockchain.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/blockchain.go b/core/blockchain.go index e357f91efc..dd11f60531 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2297,6 +2297,7 @@ func (bc *BlockChain) reorg(data *reorgData) error { "drop", len(data.oldChain), "dropfrom", data.oldChain[0].Hash(), "add", len(data.newChain), "addfrom", data.newChain[0].Hash()) blockReorgAddMeter.Mark(int64(len(data.newChain))) blockReorgDropMeter.Mark(int64(len(data.oldChain))) + blockReorgMeter.Mark(1) // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. From 9d82e6094708f5f58bf90ecb84059110686cf2c1 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 11:11:58 -0500 Subject: [PATCH 047/105] core,eth: implement AF safety mechanisms at sync level - local head staleness - min peers These are mechanisms designed to provide sane safeguard heuristics for nodes to enable/disable artificial finality features within reason. Signed-off-by: meows --- core/blockchain.go | 24 +++++++++++++++++- eth/sync.go | 62 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index dd11f60531..2bb95f6dbd 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -202,6 +202,8 @@ type BlockChain struct { running int32 // 0 if chain is running, 1 when stopped procInterrupt int32 // interrupt signaler for block processing + artificialFinalityEnabled int32 // toggles artificial finality features + engine consensus.Engine validator Validator // Block and state validator interface prefetcher Prefetcher // Block state prefetcher interface @@ -1559,7 +1561,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Reorg data error was nil. // Proceed with further reorg arbitration. - if bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { + if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) } @@ -2198,6 +2200,26 @@ func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { } } +// EnableArtificialFinality enables and disable artifical finality features for the blockchain. +// Currently toggled features include: +// - ECBP11355-MESS: modified exponential subject scoring +// +// This level of activation works BELOW the chain configuration for any of the +// potential features. eg. If ECBP11355 is not activated at the chain config x block number, +// then calling bc.EnableArtificialFinality(true) will be a noop. +// The method may be called idempotently. +func (bc *BlockChain) EnableArtificialFinality(enable bool) { + if enable { + atomic.StoreInt32(&bc.artificialFinalityEnabled, 1) + } else { + atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) + } +} + +func (bc *BlockChain) IsArtificialFinalityEnabled() bool { + return atomic.LoadInt32(&bc.artificialFinalityEnabled) == 1 +} + // errReorgFinality represents an error caused by artificial finality mechanisms. var errReorgFinality = errors.New("finality-enforced invalid new chain") diff --git a/eth/sync.go b/eth/sync.go index 26badd1e21..808cf39a2b 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params/vars" ) const ( @@ -39,11 +40,59 @@ const ( txsyncPackSize = 100 * 1024 ) +var ( + // minArtificialFinalityPeers defines the minimum number of peers our node must be connected + // to in order to enable artificial finality features. + // A minimum number of peer connections mitigates the risk of lower-powered eclipse attacks. + minArtificialFinalityPeers = defaultMinSyncPeers * 2 + + // artificialFinalitySafetyInterval defines the interval at which the local head is checked for staleness. + // If the head is found to be stale across this interval, artificial finality features are disabled. + // This prevents an abandoned victim of an eclipse attack from being forever destitute. + artificialFinalitySafetyInterval = time.Second * time.Duration(10*vars.DurationLimit.Uint64()) +) + type txsync struct { p *peer txs []*types.Transaction } +// artificialFinalitySafetyLoop compares our local head across timer intervals. +// If it changes, assuming the interval is sufficiently long, +// it means we're syncing ok: there has been a steady flow of blocks. +// If it doesn't change, it means that we've stalled syncing for some reason, +// and should disable the permapoint feature in case that's keeping +// us on a dead chain. +func (pm *ProtocolManager) artificialFinalitySafetyLoop() { + t := time.NewTicker(artificialFinalitySafetyInterval) + defer t.Stop() + + var lastHead uint64 + + for { + select { + case <-t.C: + if pm.blockchain.IsArtificialFinalityEnabled() { + // Get the latest header we have. + n := pm.blockchain.CurrentHeader().Number.Uint64() + // If it has changed, we haven't gone stale or dark. + if lastHead != n { + lastHead = n + continue + } + // Else, it hasn't changed, which means we've been at the same + // header for the whole timer interval time. + log.Warn("Disabling artificial finality", "reason", "stale safety interval", "interval", artificialFinalitySafetyInterval) + pm.blockchain.EnableArtificialFinality(false) + } else { + lastHead = 0 // reset + } + case <-pm.quitSync: + return + } + } +} + // syncTransactions starts sending all currently pending transactions to the given peer. func (pm *ProtocolManager) syncTransactions(p *peer) { // Assemble the set of transaction to broadcast or announce to the remote @@ -248,6 +297,12 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { } else if minPeers > cs.pm.maxPeers { minPeers = cs.pm.maxPeers } + if cs.pm.peers.Len() < minArtificialFinalityPeers { + if cs.pm.blockchain.IsArtificialFinalityEnabled() { + log.Warn("Disabling artificial finality", "reason", "low peers", "peers", cs.pm.peers.Len()) + cs.pm.blockchain.EnableArtificialFinality(false) + } + } if cs.pm.peers.Len() < minPeers { return nil } @@ -260,6 +315,13 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { mode, ourTD := cs.modeAndLocalHead() op := peerToSyncOp(mode, peer) if op.td.Cmp(ourTD) <= 0 { + // Enable artificial finality if parameters if should. + if op.mode == downloader.FullSync && + cs.pm.peers.Len() >= minArtificialFinalityPeers && + !cs.pm.blockchain.IsArtificialFinalityEnabled() { + log.Info("Enabling artificial finality features", "reason", "synced", "peers", cs.pm.peers.Len()) + cs.pm.blockchain.EnableArtificialFinality(true) + } return nil // We're in sync. } return op From df45f0a4abf8cdcd4d77d713a5da1876adcd027c Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 11:18:32 -0500 Subject: [PATCH 048/105] params: set MESS activation numbers for classic and mordor Signed-off-by: meows --- params/config_classic.go | 1 + params/config_mordor.go | 1 + 2 files changed, 2 insertions(+) diff --git a/params/config_classic.go b/params/config_classic.go index 33cc1c356b..769f6f8c85 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -73,6 +73,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), + ECBP11355FBlock: big.NewInt(11242400), RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), diff --git a/params/config_mordor.go b/params/config_mordor.go index 623a0cb0f8..512298b613 100644 --- a/params/config_mordor.go +++ b/params/config_mordor.go @@ -71,6 +71,7 @@ var ( ECIP1017EraRounds: big.NewInt(2000000), ECIP1010PauseBlock: nil, ECIP1010Length: nil, + ECBP11355FBlock: big.NewInt(2290740), RequireBlockHashes: map[uint64]common.Hash{ 840013: common.HexToHash("0x2ceada2b191879b71a5bcf2241dd9bc50d6d953f1640e62f9c2cee941dc61c9d"), 840014: common.HexToHash("0x8ec29dd692c8985b82410817bac232fc82805b746538d17bc924624fe74a0fcf"), From 379294e2070a1bc40630604941ffed70e242f2af Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 11:47:48 -0500 Subject: [PATCH 049/105] core,eth: refactor af enable/disable for cleaner logs Signed-off-by: meows --- core/blockchain.go | 18 +++++++++++++++--- eth/sync.go | 11 ++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 2bb95f6dbd..cffefd8e91 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2200,20 +2200,32 @@ func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { } } -// EnableArtificialFinality enables and disable artifical finality features for the blockchain. +// EnableArtificialFinality enables and disable artificial finality features for the blockchain. // Currently toggled features include: // - ECBP11355-MESS: modified exponential subject scoring // // This level of activation works BELOW the chain configuration for any of the // potential features. eg. If ECBP11355 is not activated at the chain config x block number, // then calling bc.EnableArtificialFinality(true) will be a noop. -// The method may be called idempotently. -func (bc *BlockChain) EnableArtificialFinality(enable bool) { +// The method is idempotent. +func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interface{}) { + // Store enable/disable value regardless of config activation. + var statusLog string if enable { + statusLog = "Enabled" atomic.StoreInt32(&bc.artificialFinalityEnabled, 1) } else { + statusLog = "Disabled" atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) } + configActivated := bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) + logFn := log.Debug // Deactivated + if configActivated && enable { + logFn = log.Info // Activated and enabled + } else if configActivated && !enable { + logFn = log.Warn // Activated and disabled + } + logFn(fmt.Sprintf("%s artificial finality features", statusLog), logValues...) } func (bc *BlockChain) IsArtificialFinalityEnabled() bool { diff --git a/eth/sync.go b/eth/sync.go index 808cf39a2b..343596e09b 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -44,7 +44,7 @@ var ( // minArtificialFinalityPeers defines the minimum number of peers our node must be connected // to in order to enable artificial finality features. // A minimum number of peer connections mitigates the risk of lower-powered eclipse attacks. - minArtificialFinalityPeers = defaultMinSyncPeers * 2 + minArtificialFinalityPeers = defaultMinSyncPeers * 2 // artificialFinalitySafetyInterval defines the interval at which the local head is checked for staleness. // If the head is found to be stale across this interval, artificial finality features are disabled. @@ -82,8 +82,7 @@ func (pm *ProtocolManager) artificialFinalitySafetyLoop() { } // Else, it hasn't changed, which means we've been at the same // header for the whole timer interval time. - log.Warn("Disabling artificial finality", "reason", "stale safety interval", "interval", artificialFinalitySafetyInterval) - pm.blockchain.EnableArtificialFinality(false) + pm.blockchain.EnableArtificialFinality(false, "reason", "stale safety interval", "interval", artificialFinalitySafetyInterval) } else { lastHead = 0 // reset } @@ -299,8 +298,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { } if cs.pm.peers.Len() < minArtificialFinalityPeers { if cs.pm.blockchain.IsArtificialFinalityEnabled() { - log.Warn("Disabling artificial finality", "reason", "low peers", "peers", cs.pm.peers.Len()) - cs.pm.blockchain.EnableArtificialFinality(false) + cs.pm.blockchain.EnableArtificialFinality(false, "reason", "low peers", "peers", cs.pm.peers.Len()) } } if cs.pm.peers.Len() < minPeers { @@ -319,8 +317,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { if op.mode == downloader.FullSync && cs.pm.peers.Len() >= minArtificialFinalityPeers && !cs.pm.blockchain.IsArtificialFinalityEnabled() { - log.Info("Enabling artificial finality features", "reason", "synced", "peers", cs.pm.peers.Len()) - cs.pm.blockchain.EnableArtificialFinality(true) + cs.pm.blockchain.EnableArtificialFinality(true, "reason", "synced", "peers", cs.pm.peers.Len()) } return nil // We're in sync. } From c050c21742d7e9e148a7dec4849bea50353a3b22 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 11:54:05 -0500 Subject: [PATCH 050/105] core: break af logic to own file This is intended to keep upstream merges streamlined by moving 'custom' logic to a non-colliding space. Signed-off-by: meows --- core/blockchain.go | 65 ++----------------------------------- core/blockchain_af.go | 75 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 63 deletions(-) create mode 100644 core/blockchain_af.go diff --git a/core/blockchain.go b/core/blockchain.go index cffefd8e91..ab4a068946 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "io" - "math" "math/big" mrand "math/rand" "sort" @@ -202,8 +201,6 @@ type BlockChain struct { running int32 // 0 if chain is running, 1 when stopped procInterrupt int32 // interrupt signaler for block processing - artificialFinalityEnabled int32 // toggles artificial finality features - engine consensus.Engine validator Validator // Block and state validator interface prefetcher Prefetcher // Block state prefetcher interface @@ -213,6 +210,8 @@ type BlockChain struct { badBlocks *lru.Cache // Bad block cache shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. + + artificialFinalityEnabled int32 // toggles artificial finality features } // NewBlockChain returns a fully initialised block chain using information @@ -2200,66 +2199,6 @@ func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { } } -// EnableArtificialFinality enables and disable artificial finality features for the blockchain. -// Currently toggled features include: -// - ECBP11355-MESS: modified exponential subject scoring -// -// This level of activation works BELOW the chain configuration for any of the -// potential features. eg. If ECBP11355 is not activated at the chain config x block number, -// then calling bc.EnableArtificialFinality(true) will be a noop. -// The method is idempotent. -func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interface{}) { - // Store enable/disable value regardless of config activation. - var statusLog string - if enable { - statusLog = "Enabled" - atomic.StoreInt32(&bc.artificialFinalityEnabled, 1) - } else { - statusLog = "Disabled" - atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) - } - configActivated := bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) - logFn := log.Debug // Deactivated - if configActivated && enable { - logFn = log.Info // Activated and enabled - } else if configActivated && !enable { - logFn = log.Warn // Activated and disabled - } - logFn(fmt.Sprintf("%s artificial finality features", statusLog), logValues...) -} - -func (bc *BlockChain) IsArtificialFinalityEnabled() bool { - return atomic.LoadInt32(&bc.artificialFinalityEnabled) == 1 -} - -// errReorgFinality represents an error caused by artificial finality mechanisms. -var errReorgFinality = errors.New("finality-enforced invalid new chain") - -// ecpb11355 implements the "MESS" artificial finality mechanism -// "Modified Exponential Subject Scoring" used to prefer known chain segments -// over later-to-come counterparts, especially proposed segments stretching far into the past. -func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { - commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) - - proposedParentTD := bc.GetTd(proposed.ParentHash, proposed.Number.Uint64()-1) - proposedTD := new(big.Int).Add(proposed.Difficulty, proposedParentTD) - - localTD := bc.GetTd(current.Hash(), current.Number.Uint64()) - - tdRatio, _ := new(big.Float).Quo( - new(big.Float).SetInt(new(big.Int).Sub(proposedTD, commonAncestorTD)), - new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), - ).Float64() - - antiGravity := math.Pow(1.0001, float64(proposed.Time-commonAncestor.Time)) - - if tdRatio < antiGravity { - // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.3f) < antigravity(%0.3f)", errReorgFinality, tdRatio, antiGravity) - } - return nil -} - // reorg takes two blocks, an old chain and a new chain and will reconstruct the // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. diff --git a/core/blockchain_af.go b/core/blockchain_af.go new file mode 100644 index 0000000000..181fd101b5 --- /dev/null +++ b/core/blockchain_af.go @@ -0,0 +1,75 @@ +package core + +import ( + "errors" + "fmt" + "math" + "math/big" + "sync/atomic" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// errReorgFinality represents an error caused by artificial finality mechanisms. +var errReorgFinality = errors.New("finality-enforced invalid new chain") + +// EnableArtificialFinality enables and disable artificial finality features for the blockchain. +// Currently toggled features include: +// - ECBP11355-MESS: modified exponential subject scoring +// +// This level of activation works BELOW the chain configuration for any of the +// potential features. eg. If ECBP11355 is not activated at the chain config x block number, +// then calling bc.EnableArtificialFinality(true) will be a noop. +// The method is idempotent. +func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interface{}) { + // Store enable/disable value regardless of config activation. + var statusLog string + if enable { + statusLog = "Enabled" + atomic.StoreInt32(&bc.artificialFinalityEnabled, 1) + } else { + statusLog = "Disabled" + atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) + } + configActivated := bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) + logFn := log.Debug // Deactivated + if configActivated && enable { + logFn = log.Info // Activated and enabled + } else if configActivated && !enable { + logFn = log.Warn // Activated and disabled + } + logFn(fmt.Sprintf("%s artificial finality features", statusLog), logValues...) +} + +// IsArtificialFinalityEnabled returns the status of the blockchain's artificial +// finality feature setting. +// This status is agnostic of feature activation by chain configuration. +func (bc *BlockChain) IsArtificialFinalityEnabled() bool { + return atomic.LoadInt32(&bc.artificialFinalityEnabled) == 1 +} + +// ecpb11355 implements the "MESS" artificial finality mechanism +// "Modified Exponential Subject Scoring" used to prefer known chain segments +// over later-to-come counterparts, especially proposed segments stretching far into the past. +func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { + commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) + + proposedParentTD := bc.GetTd(proposed.ParentHash, proposed.Number.Uint64()-1) + proposedTD := new(big.Int).Add(proposed.Difficulty, proposedParentTD) + + localTD := bc.GetTd(current.Hash(), current.Number.Uint64()) + + tdRatio, _ := new(big.Float).Quo( + new(big.Float).SetInt(new(big.Int).Sub(proposedTD, commonAncestorTD)), + new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), + ).Float64() + + antiGravity := math.Pow(1.0001, float64(proposed.Time-commonAncestor.Time)) + + if tdRatio < antiGravity { + // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). + return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.3f) < antigravity(%0.3f)", errReorgFinality, tdRatio, antiGravity) + } + return nil +} From 2d2c9796e0a68f87b62371c0c0be8ae18c4dbdb2 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 11:55:21 -0500 Subject: [PATCH 051/105] :nail_care: --- core/blockchain_af.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 181fd101b5..0d12b9cedd 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -50,7 +50,7 @@ func (bc *BlockChain) IsArtificialFinalityEnabled() bool { } // ecpb11355 implements the "MESS" artificial finality mechanism -// "Modified Exponential Subject Scoring" used to prefer known chain segments +// "Modified Exponential Subjective Scoring" used to prefer known chain segments // over later-to-come counterparts, especially proposed segments stretching far into the past. func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) From 9101fe5cab982a55dc00f20bf52b82b47c48c5b8 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 13:39:39 -0500 Subject: [PATCH 052/105] params: default MessNet chain config and genesis Signed-off-by: meows --- params/config_classic.go | 74 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/params/config_classic.go b/params/config_classic.go index 769f6f8c85..6a28988344 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -19,8 +19,10 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params/types/coregeth" "github.com/ethereum/go-ethereum/params/types/ctypes" + "github.com/ethereum/go-ethereum/params/types/genesisT" ) var ( @@ -83,4 +85,76 @@ var ( DisinflationRateQuotient = big.NewInt(4) // Disinflation rate quotient for ECIP1017 DisinflationRateDivisor = big.NewInt(5) // Disinflation rate divisor for ECIP1017 ExpDiffPeriod = big.NewInt(100000) // Exponential diff period for diff bomb & ECIP1010 + + MessNetConfig = &coregeth.CoreGethChainConfig{ + NetworkID: 1, + Ethash: new(ctypes.EthashConfig), + ChainID: big.NewInt(6161), + + EIP2FBlock: big.NewInt(1), + EIP7FBlock: big.NewInt(1), + + DAOForkBlock: nil, + + EIP150Block: big.NewInt(2), + + EIP155Block: big.NewInt(3), + EIP160FBlock: big.NewInt(3), + + // EIP158~ + EIP161FBlock: big.NewInt(8), + EIP170FBlock: big.NewInt(8), + + // Byzantium eq + EIP100FBlock: big.NewInt(8), + EIP140FBlock: big.NewInt(8), + EIP198FBlock: big.NewInt(8), + EIP211FBlock: big.NewInt(8), + EIP212FBlock: big.NewInt(8), + EIP213FBlock: big.NewInt(8), + EIP214FBlock: big.NewInt(8), + EIP658FBlock: big.NewInt(8), + + // Constantinople eq, aka Agharta + EIP145FBlock: big.NewInt(9), + EIP1014FBlock: big.NewInt(9), + EIP1052FBlock: big.NewInt(9), + + // Istanbul eq, aka Phoenix + // ECIP-1088 + EIP152FBlock: big.NewInt(10), + EIP1108FBlock: big.NewInt(10), + EIP1344FBlock: big.NewInt(10), + EIP1884FBlock: big.NewInt(10), + EIP2028FBlock: big.NewInt(10), + EIP2200FBlock: big.NewInt(10), // RePetersburg (=~ re-1283) + + DisposalBlock: big.NewInt(5), + ECIP1017FBlock: big.NewInt(5), + ECIP1017EraRounds: big.NewInt(5000), + ECIP1010PauseBlock: big.NewInt(3), + ECIP1010Length: big.NewInt(2), + ECBP11355FBlock: big.NewInt(11), + } + ) + +func DefaultMessNetGenesisBlock() *genesisT.Genesis { + return &genesisT.Genesis{ + Config: MessNetConfig, + Timestamp: 1598650845, + ExtraData: hexutil.MustDecode("0x4235353535353535353535353535353535353535353535353535353535353535"), + GasLimit: 10485760, + Difficulty: big.NewInt(37103392657464), + Alloc: map[common.Address]genesisT.GenesisAccount{ + common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover + common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256 + common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD + common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity + common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp + common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd + common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul + common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing + }, + } +} From 295c436003cc94c8e9d9069e3b611756fd64145a Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 13:40:32 -0500 Subject: [PATCH 053/105] core: increase logging precision for MESS outcome Should help with debugging if it comes to that. 4 decimals chosen because that matches the current base value precision: 1.0001 Signed-off-by: meows --- core/blockchain_af.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 0d12b9cedd..01781eb895 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -69,7 +69,7 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) if tdRatio < antiGravity { // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.3f) < antigravity(%0.3f)", errReorgFinality, tdRatio, antiGravity) + return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.4f) < antigravity(%0.4f)", errReorgFinality, tdRatio, antiGravity) } return nil } From ec27ad45a2507d42b4f56e089c51c31e91813efd Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 13:41:18 -0500 Subject: [PATCH 054/105] core: case-based tests for easy/hard MESS chain acceptance Signed-off-by: meows --- core/blockchain_af_test.go | 95 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 core/blockchain_af_test.go diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go new file mode 100644 index 0000000000..ba67d1d59c --- /dev/null +++ b/core/blockchain_af_test.go @@ -0,0 +1,95 @@ +package core + +import ( + "math" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" +) + +func TestBlockChain_AF_ECBP11355(t *testing.T) { + + cases := []struct{ + easyLen, hardLen, commonAncestorN int + easyOffset, hardOffset int64 + hardGetsHead, accepted bool + }{ + // Hard has insufficient total difficulty / length. + { + 5000, 7500, 2500, + 60, 1, + false, false, + }, + // Hard has insufficient total difficulty / length. + { + 1000, 7, 995, + 60, 9, + false, false, + }, + // Hard has sufficient total difficulty / length to be accepted and set as head. + { + 1000, 7, 995, + 60, 7, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 1000, 1, 999, + 30, 1, + true, true, + }, + // Hard is accepted, but does not have greater total difficulty, + // and is not set as the chain head. + { + 1000, 1, 900, + 60, 1, + false, true, + }, + } + + runTest := func(easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error) { + // Generate the original common chain segment and the two competing forks + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(true) + + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(easyT) + }) + commonAncestor := easy[caN-1] + hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(hardT) + }) + + if _, err := chain.InsertChain(easy); err != nil { + t.Fatal(err) + } + _, err = chain.InsertChain(hard) + hardHead = chain.CurrentBlock().Hash() == hard[len(hard)-1].Hash() + return + } + + for i, c := range cases { + hardHead, err := runTest(c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { + t.Errorf("case=%d want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", + i, c.accepted, c.hardGetsHead, hardHead, err) + } + } +} From 2fa50564d8455c96f0fb77496e4124230e14daf8 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 14:08:34 -0500 Subject: [PATCH 055/105] eth: actually install artificialFinalitySafetyLoop goroutine Forgot to actually plug this in. Signed-off-by: meows --- eth/handler.go | 4 ++++ eth/sync.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/eth/handler.go b/eth/handler.go index 0b300f5d9a..6e8a0ed8cb 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -267,6 +267,10 @@ func (pm *ProtocolManager) Start(maxPeers int) { pm.wg.Add(2) go pm.chainSync.loop() go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64. + + // start artificial finality safety loop + pm.wg.Add(1) + go pm.artificialFinalitySafetyLoop() } func (pm *ProtocolManager) Stop() { diff --git a/eth/sync.go b/eth/sync.go index 343596e09b..da9456d8c9 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -64,6 +64,8 @@ type txsync struct { // and should disable the permapoint feature in case that's keeping // us on a dead chain. func (pm *ProtocolManager) artificialFinalitySafetyLoop() { + defer pm.wg.Done() + t := time.NewTicker(artificialFinalitySafetyInterval) defer t.Stop() From ae203d252bb97f2956fcbb834f766c671561823c Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 14:11:14 -0500 Subject: [PATCH 056/105] params: add comments on ETA for classic and mordor MESS activations Signed-off-by: meows --- params/config_classic.go | 2 +- params/config_mordor.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/params/config_classic.go b/params/config_classic.go index 6a28988344..741d40d550 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP11355FBlock: big.NewInt(11242400), + ECBP11355FBlock: big.NewInt(11242400), // ETA 20 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), diff --git a/params/config_mordor.go b/params/config_mordor.go index 512298b613..34ecde70d6 100644 --- a/params/config_mordor.go +++ b/params/config_mordor.go @@ -71,7 +71,7 @@ var ( ECIP1017EraRounds: big.NewInt(2000000), ECIP1010PauseBlock: nil, ECIP1010Length: nil, - ECBP11355FBlock: big.NewInt(2290740), + ECBP11355FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 840013: common.HexToHash("0x2ceada2b191879b71a5bcf2241dd9bc50d6d953f1640e62f9c2cee941dc61c9d"), 840014: common.HexToHash("0x8ec29dd692c8985b82410817bac232fc82805b746538d17bc924624fe74a0fcf"), From 88d5439f53838e0a5b4c16972cace7d5106eb396 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 14:46:53 -0500 Subject: [PATCH 057/105] core: increase MESS log precision to 5 One order of magnitude greater than the exponent's base of 1.0001. Signed-off-by: meows --- core/blockchain_af.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 01781eb895..56946e099d 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -69,7 +69,7 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) if tdRatio < antiGravity { // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.4f) < antigravity(%0.4f)", errReorgFinality, tdRatio, antiGravity) + return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.5f) < antigravity(%0.5f)", errReorgFinality, tdRatio, antiGravity) } return nil } From 84b87dd49ca784021eeb068c3c356f34006e4f52 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 14:47:14 -0500 Subject: [PATCH 058/105] core: add more test cases showing approaching limits Signed-off-by: meows --- core/blockchain_af_test.go | 140 +++++++++++++++++++++++++++++++++++-- 1 file changed, 133 insertions(+), 7 deletions(-) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index ba67d1d59c..c61b465075 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -19,19 +19,19 @@ func TestBlockChain_AF_ECBP11355(t *testing.T) { easyOffset, hardOffset int64 hardGetsHead, accepted bool }{ - // Hard has insufficient total difficulty / length. + // Hard has insufficient total difficulty / length and is rejected. { 5000, 7500, 2500, - 60, 1, + 50, -9, false, false, }, - // Hard has insufficient total difficulty / length. + // Hard has sufficient total difficulty / length and is accepted. { 1000, 7, 995, - 60, 9, - false, false, + 60, 0, + true, true, }, - // Hard has sufficient total difficulty / length to be accepted and set as head. + // Hard has sufficient total difficulty / length and is accepted. { 1000, 7, 995, 60, 7, @@ -43,11 +43,137 @@ func TestBlockChain_AF_ECBP11355(t *testing.T) { 30, 1, true, true, }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 3, 497, + 0, -8, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 4, 496, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 5, 495, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 6, 494, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 7, 493, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 8, 492, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 9, 491, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 12, 488, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 20, 480, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 40, 460, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 60, 440, + 0, -9, + true, true, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 0, -9, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 7, -9, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 17, -9, + false, false, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 80, 420, + 47, -9, + true, true, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 47, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 17, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 7, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 80, 420, + 0, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 40, 460, + 0, -7, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 14, 486, + 0, -7, + false, false, + }, // Hard is accepted, but does not have greater total difficulty, // and is not set as the chain head. { 1000, 1, 900, - 60, 1, + 60, -9, false, true, }, } From 0268698d7ea43429800b4d308e703cdc80ee503b Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 10 Sep 2020 14:49:04 -0500 Subject: [PATCH 059/105] core,params: (lint): goimports -w Signed-off-by: meows --- core/blockchain_af_test.go | 6 +++--- params/config_classic.go | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index c61b465075..e6ba406e9e 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -14,10 +14,10 @@ import ( func TestBlockChain_AF_ECBP11355(t *testing.T) { - cases := []struct{ + cases := []struct { easyLen, hardLen, commonAncestorN int - easyOffset, hardOffset int64 - hardGetsHead, accepted bool + easyOffset, hardOffset int64 + hardGetsHead, accepted bool }{ // Hard has insufficient total difficulty / length and is rejected. { diff --git a/params/config_classic.go b/params/config_classic.go index 741d40d550..f6daf5a19c 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -136,7 +136,6 @@ var ( ECIP1010Length: big.NewInt(2), ECBP11355FBlock: big.NewInt(11), } - ) func DefaultMessNetGenesisBlock() *genesisT.Genesis { From a34bf535167d1a930b5e3bd51ec12f92a0480a51 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 10:07:14 -0500 Subject: [PATCH 060/105] core: refactor reorg logic in writeBlockWithState Signed-off-by: meows --- core/blockchain.go | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index ab4a068946..1ac201d4bd 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1543,35 +1543,29 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) } } - status = CanonStatTy - discontiguousBlocks := block.ParentHash() != currentBlock.Hash() - reorg = reorg && discontiguousBlocks if reorg { - // Reorganise the chain if the parent is not the head block - d := bc.getReorgData(currentBlock, block) - if d.err != nil { - // Will ALWAYS return the/an error, since the data.err field is non-nil. + if block.ParentHash() != currentBlock.Hash() { + // Reorganise the chain if the parent is not the head block + d := bc.getReorgData(currentBlock, block) + if d.err == nil { + // Reorg data error was nil. + // Proceed with further reorg arbitration. + if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { + d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) + } + } // We leave the error to the reorg method to handle, if it wants to wrap it or log it or whatever. if err := bc.reorg(d); err != nil { return NonStatTy, err } } - - // Reorg data error was nil. - // Proceed with further reorg arbitration. - if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { - d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) - } - - if err := bc.reorg(d); err != nil { - return NonStatTy, err - } - // Status is (remains) canon; reorg succeeded. - - } else if discontiguousBlocks { + // Status is canon; reorg succeeded. + status = CanonStatTy + } else { status = SideStatTy } + // Set new head. if status == CanonStatTy { bc.writeHeadBlock(block) @@ -2203,6 +2197,8 @@ func (bc *BlockChain) getReorgData(oldBlock, newBlock *types.Block) *reorgData { // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. // If reorgData passed contains an a non-nil error, the method is expect to return it immediately. +// reorgData is NOT expected to ever return an error of its own, since reorg arbitration +// should happen externally. // This kind-of-strange pattern is in place to allow the function to issue "special case" warning logs // consistent with its behavior prior to refactoring. func (bc *BlockChain) reorg(data *reorgData) error { From 905564e35f7d2c2201f748d409145612bee17efd Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 10:08:49 -0500 Subject: [PATCH 061/105] core: refactor and document fn alternatives, select sinusoidal The sinusoidal function seems the most desirable. This changeset also includes a shallow threshold of 3 blocks, returning positive acceptance with difficulty is equivalent. This is intended to sidestep negative impacts on uncle rates. Signed-off-by: meows --- core/blockchain_af.go | 87 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 56946e099d..4d36980883 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -5,7 +5,9 @@ import ( "fmt" "math" "math/big" + "math/rand" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -14,6 +16,10 @@ import ( // errReorgFinality represents an error caused by artificial finality mechanisms. var errReorgFinality = errors.New("finality-enforced invalid new chain") +func init() { + rand.Seed(time.Now().UnixNano()) +} + // EnableArtificialFinality enables and disable artificial finality features for the blockchain. // Currently toggled features include: // - ECBP11355-MESS: modified exponential subject scoring @@ -53,6 +59,8 @@ func (bc *BlockChain) IsArtificialFinalityEnabled() bool { // "Modified Exponential Subjective Scoring" used to prefer known chain segments // over later-to-come counterparts, especially proposed segments stretching far into the past. func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { + + // Get the total difficulty ratio of the proposed chain segment over the existing one. commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) proposedParentTD := bc.GetTd(proposed.ParentHash, proposed.Number.Uint64()-1) @@ -65,11 +73,86 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), ).Float64() - antiGravity := math.Pow(1.0001, float64(proposed.Time-commonAncestor.Time)) + // See comment at ecbp11355AGSameSameShallowOK. + // If this condition runs, the reorg check will already have + // passed a coin toss (combined with selfish preference) for acceptance. + if tdRatio == 1 && + proposed.Number.Uint64() - current.Number.Uint64() <= 3 { + return nil + } + + x := float64(proposed.Time-commonAncestor.Time) + antiGravity := ecbp11355AGSinusoidalA(x) if tdRatio < antiGravity { // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - return fmt.Errorf("%w: ECPB11355-MESS: td.b/a(%0.5f) < antigravity(%0.5f)", errReorgFinality, tdRatio, antiGravity) + underpoweredBy := tdRatio / antiGravity + return fmt.Errorf("%w: ECPB11355-MESS: td.B/A%0.6f < antigravity%0.6f (under=%0.6f)", errReorgFinality, tdRatio, antiGravity, underpoweredBy) } return nil } + +// ecbp11355AGSameSameShallowOK is an allowance arbitration function for chain segments +// of equal total difficulty using probability weighted toward short segments. +// Removing the probability and simply using a short cap may also work fine. +// If the unmoderated MESS algorithm turns out to generate an undesirable uncle rate, +// this may be a good solution. +func ecbp11355AGSameSameShallowOK(tdRatio float64, proposed, commonAncestor *types.Header) bool { + if tdRatio == 1 { + // If the segment is short and TD ratio is 1, make acceptance a probability, + // weighting toward short segments. + length := float64(proposed.Number.Uint64() - commonAncestor.Number.Uint64()) + if length <= 4 { + r := 1 / (length + 1) + if rand.Float64() < r { + return true + } + } + } + return false +} + +/* +ecbp11355AGSinusoidalA is a sinusoidal function. + +OPTION 3: Yet slower takeoff, yet steeper eventual ascent. Has a differentiable ceiling. +h(x)=15 sin((x+12000 π)/(8000))+15+1 + +*/ +func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { + ampl := float64(15) // amplitude + pDiv := float64(8000) // period divisor + phaseShift := math.Pi * (pDiv*1.5) + peakX := math.Pi * pDiv // x value of first sin peak where x > 0 + if x > peakX { + // Cause the x value to limit to the x value of the first peak of the sin wave (ceiling). + x = peakX + } + return (ampl * math.Sin((x+phaseShift)/pDiv)) + ampl + 1 +} + +/* +ecbp11355AGExpB is an exponential function with x as a base (and rationalized exponent). + +OPTION 2: Slightly slower takeoff, steeper eventual ascent +g(x)=x^(x*0.00002) + */ +func ecbp11355AGExpB(x float64) (antiGravity float64) { + return math.Pow(x, x*0.00002) +} + +/* +ecbp11355AGExpA is an exponential function with x as exponent. + +This was (one of?) Vitalik's "original" specs: +> 1.0001 ** (number of seconds between when S1 was received and when S2 was received) +- https://bitcointalk.org/index.php?topic=865169.msg16349234#msg16349234 +> gravity(B') = gravity(B) * 0.99 ^ n +- https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/ + +OPTION 1 (Original ESS) +f(x)=1.0001^(x) +*/ +func ecbp11355AGExpA(x float64) (antiGravity float64) { + return math.Pow(1.0001, x) +} From 26047a53408e1b4be0d387e79a1c9fdf967e3c0b Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 10:24:00 -0500 Subject: [PATCH 062/105] core,go.mod,go.sum: include test that makes graphs, and remove same same shallow Signed-off-by: meows --- core/blockchain_af.go | 8 -- core/blockchain_af_test.go | 187 +++++++++++++++++++++++++++---------- go.mod | 1 + go.sum | 43 +++++++++ 4 files changed, 184 insertions(+), 55 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 4d36980883..71c8b4ca09 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -73,14 +73,6 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), ).Float64() - // See comment at ecbp11355AGSameSameShallowOK. - // If this condition runs, the reorg check will already have - // passed a coin toss (combined with selfish preference) for acceptance. - if tdRatio == 1 && - proposed.Number.Uint64() - current.Number.Uint64() <= 3 { - return nil - } - x := float64(proposed.Time-commonAncestor.Time) antiGravity := ecbp11355AGSinusoidalA(x) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index e6ba406e9e..3ce34618f4 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -1,6 +1,9 @@ package core import ( + "fmt" + "image/color" + "log" "math" "math/rand" "testing" @@ -10,9 +13,52 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "gonum.org/v1/plot" + "gonum.org/v1/plot/plotter" + "gonum.org/v1/plot/vg" + "gonum.org/v1/plot/vg/draw" ) +var yuckyGlobalTestEnableMess = false + +func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error) { + // Generate the original common chain segment and the two competing forks + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(yuckyGlobalTestEnableMess) + + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(easyT) + }) + commonAncestor := easy[caN-1] + hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(hardT) + }) + + if _, err := chain.InsertChain(easy); err != nil { + t.Fatal(err) + } + _, err = chain.InsertChain(hard) + hardHead = chain.CurrentBlock().Hash() == hard[len(hard)-1].Hash() + return +} + func TestBlockChain_AF_ECBP11355(t *testing.T) { + yuckyGlobalTestEnableMess = true + defer func() { + yuckyGlobalTestEnableMess = false + }() cases := []struct { easyLen, hardLen, commonAncestorN int @@ -111,61 +157,55 @@ func TestBlockChain_AF_ECBP11355(t *testing.T) { }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 0, -9, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 7, -9, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 17, -9, false, false, }, // Hard has sufficient total difficulty / length and is accepted. { - 500, 80, 420, + 500, 200, 300, 47, -9, true, true, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 47, -8, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 17, -8, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 7, -8, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 80, 420, + 500, 200, 300, 0, -8, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 40, 460, - 0, -7, - false, false, - }, - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 14, 486, + 500, 100, 400, 0, -7, false, false, }, @@ -176,46 +216,99 @@ func TestBlockChain_AF_ECBP11355(t *testing.T) { 60, -9, false, true, }, + // Hard is shorter, but sufficiently heavier chain, is accepted. + { + 500, 100, 390, + 60, -9, + true, true, + }, } - runTest := func(easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error) { - // Generate the original common chain segment and the two competing forks - engine := ethash.NewFaker() + for i, c := range cases { + hardHead, err := runMESSTest(t, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { + t.Errorf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", + i, + c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, + c.accepted, c.hardGetsHead, hardHead, err) + } + } +} - db := rawdb.NewMemoryDatabase() - genesis := params.DefaultMessNetGenesisBlock() - genesisB := MustCommitGenesis(db, genesis) +func TestBlockChain_GenerateMESSPlot(t *testing.T) { + t.Skip("Test plots graph of chain acceptance for visualization.") - chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + easyLen := 200 + maxHardLen := 100 + + generatePlot := func(title, fileName string) { + p, err := plot.New() if err != nil { - t.Fatal(err) + log.Panic(err) } - defer chain.Stop() - chain.EnableArtificialFinality(true) - - easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { - b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) - b.OffsetTime(easyT) - }) - commonAncestor := easy[caN-1] - hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { - b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) - b.OffsetTime(hardT) - }) - - if _, err := chain.InsertChain(easy); err != nil { - t.Fatal(err) + p.Title.Text = title + p.X.Label.Text = "Block Depth" + p.Y.Label.Text = "Relative Block Time Delta (10 seconds + y)" + + accepteds := plotter.XYs{} + rejecteds := plotter.XYs{} + sides := plotter.XYs{} + + for i := 1; i <= maxHardLen; i++ { + for j := -9; j <= 8; j++ { + fmt.Println("running", i, j) + hardHead, err := runMESSTest(t, easyLen, i, easyLen-i, 0, int64(j)) + point := plotter.XY{X: float64(i), Y: float64(j)} + if err == nil && hardHead { + accepteds = append(accepteds, point) + } else if err == nil && !hardHead { + sides = append(sides, point) + } else if err != nil { + rejecteds = append(rejecteds, point) + } + + if err != nil { + t.Log(err) + } + } } - _, err = chain.InsertChain(hard) - hardHead = chain.CurrentBlock().Hash() == hard[len(hard)-1].Hash() - return - } - for i, c := range cases { - hardHead, err := runTest(c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) - if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { - t.Errorf("case=%d want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", - i, c.accepted, c.hardGetsHead, hardHead, err) + scatterAccept, _ := plotter.NewScatter(accepteds) + scatterReject, _ := plotter.NewScatter(rejecteds) + scatterSide, _ := plotter.NewScatter(sides) + + pixelWidth := vg.Length(1000) + + scatterAccept.Color = color.RGBA{R: 152, G: 236, B: 161, A: 255} + scatterAccept.Shape = draw.BoxGlyph{} + scatterAccept.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + scatterReject.Color = color.RGBA{R: 236, G: 106, B: 94, A: 255} + scatterReject.Shape = draw.BoxGlyph{} + scatterReject.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + scatterSide.Color = color.RGBA{R: 190, G: 197, B: 236, A: 255} + scatterSide.Shape = draw.BoxGlyph{} + scatterSide.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + + p.Add(scatterAccept) + p.Legend.Add("Accepted", scatterAccept) + p.Add(scatterReject) + p.Legend.Add("Rejected", scatterReject) + p.Add(scatterSide) + p.Legend.Add("Sidechained", scatterSide) + + p.Legend.YOffs = -30 + + err = p.Save(pixelWidth, 300, fileName) + if err != nil { + log.Panic(err) } } + yuckyGlobalTestEnableMess = true + defer func() { + yuckyGlobalTestEnableMess = false + }() + baseTitle := fmt.Sprintf("Accept/Reject Reorgs: Relative Time (Difficulty) over Proposed Segment Length (%d-block original chain)", easyLen) + generatePlot(baseTitle, "reorgs-MESS.png") + yuckyGlobalTestEnableMess = false + generatePlot("WITHOUT MESS: " + baseTitle, "reorgs-noMESS.png") } diff --git a/go.mod b/go.mod index 92ddfbb6e5..4d0a1a5279 100755 --- a/go.mod +++ b/go.mod @@ -69,6 +69,7 @@ require ( golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + gonum.org/v1/plot v0.8.0 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 diff --git a/go.sum b/go.sum index 4de62984bd..dd271248eb 100755 --- a/go.sum +++ b/go.sum @@ -11,6 +11,7 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20200628203458-851255f7a67b/go.mod h1:jiUwifN9cRl/zmco43aAqh0aV+s9GbhG13KcD+gEpkU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -41,6 +42,8 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= @@ -57,6 +60,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -105,6 +109,9 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -112,9 +119,13 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35 h1:uroDDLmuCK5Pz5J/Ef5vCL6F0sJmAtZFTm0/cF027F4= +github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35/go.mod h1:PNI+CcWytn/2Z/9f1SGOOYn0eILruVyp0v2/iAs8asQ= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -129,6 +140,8 @@ github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -218,6 +231,10 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5PtRc= +github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -291,6 +308,7 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -316,6 +334,7 @@ github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9Ac github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I= @@ -386,13 +405,23 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519 h1:1e2ufUJNM3lCHEY5jIgac/7UTjd6cgJNdatjPdFWf34= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -452,6 +481,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -466,8 +496,10 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -480,12 +512,21 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.1 h1:wGtP3yGpc5mCLOLeTeBdjeui9oZSz5De0eOjMLC/QuQ= +gonum.org/v1/gonum v0.8.1/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.8.0 h1:dNgubmltsMoehfn6XgbutHpicbUfbkcGSxkICy1bC4o= +gonum.org/v1/plot v0.8.0/go.mod h1:3GH8dTfoceRTELDnv+4HNwbvM/eMfdDUGHFG2bo3NeE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -545,3 +586,5 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= From 94e7fed35a563253fd51e918e13aaf9c94d4b8bd Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 10:30:55 -0500 Subject: [PATCH 063/105] forkid: update forkid tests with classic,mordor ecbp11355 activations Signed-off-by: meows --- core/forkid/forkid_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 8adab9ae60..d275c0c4f6 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -161,8 +161,10 @@ func TestCreation(t *testing.T) { {9573000, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {9573001, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {10500838, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, - {10500839, ID{Hash: checksumToBytes(0x9007bfcc), Next: 0}}, - {10500840, ID{Hash: checksumToBytes(0x9007bfcc), Next: 0}}, + {10500839, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, + {10500840, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, + {11242399, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, + {11242400, ID{Hash: checksumToBytes(0x8999f519), Next: 0}}, }, }, { @@ -197,8 +199,9 @@ func TestCreation(t *testing.T) { {301243, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {301244, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {999982, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, - {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, - {999984, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, + {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 2290740}}, + {2290739, ID{Hash: checksumToBytes(0xf42f5539), Next: 2290740}}, + {2290740, ID{Hash: checksumToBytes(0xafae52d3), Next: 0}}, }, }, } @@ -321,7 +324,7 @@ func TestGatherForks(t *testing.T) { { "classic", params.ClassicChainConfig, - []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839}, + []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839, 11242400}, }, { "mainnet", @@ -331,7 +334,7 @@ func TestGatherForks(t *testing.T) { { "mordor", params.MordorChainConfig, - []uint64{301_243, 999_983}, + []uint64{301_243, 999_983, 2290740}, }, { "kotti", From d8c7680ddcda5fb971f4546c6eccb2cddf5f79d6 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 11:51:56 -0500 Subject: [PATCH 064/105] .golangci.yml: include AF unused functions in allowed deadcode Signed-off-by: meows --- .golangci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 3260c0f987..aea7a35b66 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -50,3 +50,6 @@ issues: - path: cmd/faucet/ linters: - deadcode + - path: core/blockchain_af.go + linters: + - deadcode From f66da3fc5fc7db03a29e95b0622a6b5e624d7276 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 11:52:20 -0500 Subject: [PATCH 065/105] core: lint, add sin fn test Signed-off-by: meows --- core/blockchain_af.go | 47 +++++++++++++++++--------------------- core/blockchain_af_test.go | 19 +++++++++++++-- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 71c8b4ca09..2fbf4eb39f 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -7,7 +7,6 @@ import ( "math/big" "math/rand" "sync/atomic" - "time" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -16,10 +15,6 @@ import ( // errReorgFinality represents an error caused by artificial finality mechanisms. var errReorgFinality = errors.New("finality-enforced invalid new chain") -func init() { - rand.Seed(time.Now().UnixNano()) -} - // EnableArtificialFinality enables and disable artificial finality features for the blockchain. // Currently toggled features include: // - ECBP11355-MESS: modified exponential subject scoring @@ -73,7 +68,7 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), ).Float64() - x := float64(proposed.Time-commonAncestor.Time) + x := float64(proposed.Time - commonAncestor.Time) antiGravity := ecbp11355AGSinusoidalA(x) if tdRatio < antiGravity { @@ -84,6 +79,25 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) return nil } +/* +ecbp11355AGSinusoidalA is a sinusoidal function. + +OPTION 3: Yet slower takeoff, yet steeper eventual ascent. Has a differentiable ceiling transition. +h(x)=15 sin((x+12000 π)/(8000))+15+1 + +*/ +func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { + ampl := float64(15) // amplitude + pDiv := float64(8000) // period divisor + phaseShift := math.Pi * (pDiv * 1.5) + peakX := math.Pi * pDiv // x value of first sin peak where x > 0 + if x > peakX { + // Cause the x value to limit to the x value of the first peak of the sin wave (ceiling). + x = peakX + } + return (ampl * math.Sin((x+phaseShift)/pDiv)) + ampl + 1 +} + // ecbp11355AGSameSameShallowOK is an allowance arbitration function for chain segments // of equal total difficulty using probability weighted toward short segments. // Removing the probability and simply using a short cap may also work fine. @@ -104,31 +118,12 @@ func ecbp11355AGSameSameShallowOK(tdRatio float64, proposed, commonAncestor *typ return false } -/* -ecbp11355AGSinusoidalA is a sinusoidal function. - -OPTION 3: Yet slower takeoff, yet steeper eventual ascent. Has a differentiable ceiling. -h(x)=15 sin((x+12000 π)/(8000))+15+1 - -*/ -func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { - ampl := float64(15) // amplitude - pDiv := float64(8000) // period divisor - phaseShift := math.Pi * (pDiv*1.5) - peakX := math.Pi * pDiv // x value of first sin peak where x > 0 - if x > peakX { - // Cause the x value to limit to the x value of the first peak of the sin wave (ceiling). - x = peakX - } - return (ampl * math.Sin((x+phaseShift)/pDiv)) + ampl + 1 -} - /* ecbp11355AGExpB is an exponential function with x as a base (and rationalized exponent). OPTION 2: Slightly slower takeoff, steeper eventual ascent g(x)=x^(x*0.00002) - */ +*/ func ecbp11355AGExpB(x float64) (antiGravity float64) { return math.Pow(x, x*0.00002) } diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 3ce34618f4..9e7535f624 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -236,7 +236,7 @@ func TestBlockChain_AF_ECBP11355(t *testing.T) { } func TestBlockChain_GenerateMESSPlot(t *testing.T) { - t.Skip("Test plots graph of chain acceptance for visualization.") + t.Skip("This test plots graph of chain acceptance for visualization.") easyLen := 200 maxHardLen := 100 @@ -310,5 +310,20 @@ func TestBlockChain_GenerateMESSPlot(t *testing.T) { baseTitle := fmt.Sprintf("Accept/Reject Reorgs: Relative Time (Difficulty) over Proposed Segment Length (%d-block original chain)", easyLen) generatePlot(baseTitle, "reorgs-MESS.png") yuckyGlobalTestEnableMess = false - generatePlot("WITHOUT MESS: " + baseTitle, "reorgs-noMESS.png") + generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") +} + +func TestEcbp11355AGSinusoidalA(t *testing.T) { + cases := []struct{ + in, out float64 + }{ + {0, 1}, + {25132, 31}, + } + tolerance := 0.0000001 + for i, c := range cases { + if got := ecbp11355AGSinusoidalA(c.in); got < c.out - tolerance || got > c.out + tolerance { + t.Fatalf("%d: in: %0.6f want: %0.6f got: %0.6f", i, c.in, c.out, got) + } + } } From 49e02ff8071a7833f29ca79722bcbd3b91556d81 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 11:53:48 -0500 Subject: [PATCH 066/105] core: re-include redundant reorg condition Rel https://github.com/etclabscore/core-geth-private/pull/7#discussion_r486968372 Signed-off-by: meows --- core/blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index 1ac201d4bd..250205f7b2 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1530,7 +1530,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf reorg := externTd.Cmp(localTd) > 0 currentBlock = bc.CurrentBlock() - if externTd.Cmp(localTd) == 0 { + if !reorg && externTd.Cmp(localTd) == 0 { // Split same-difficulty blocks by number, then preferentially select // the block generated by the local miner as the canonical block. if block.NumberU64() < currentBlock.NumberU64() { From 9ff67d4e022932782ab73e030aae09af84efb5b4 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 11:55:05 -0500 Subject: [PATCH 067/105] core: (lint): goimports -w Signed-off-by: meows --- core/blockchain_af_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 9e7535f624..473b0a515f 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -314,7 +314,7 @@ func TestBlockChain_GenerateMESSPlot(t *testing.T) { } func TestEcbp11355AGSinusoidalA(t *testing.T) { - cases := []struct{ + cases := []struct { in, out float64 }{ {0, 1}, @@ -322,7 +322,7 @@ func TestEcbp11355AGSinusoidalA(t *testing.T) { } tolerance := 0.0000001 for i, c := range cases { - if got := ecbp11355AGSinusoidalA(c.in); got < c.out - tolerance || got > c.out + tolerance { + if got := ecbp11355AGSinusoidalA(c.in); got < c.out-tolerance || got > c.out+tolerance { t.Fatalf("%d: in: %0.6f want: %0.6f got: %0.6f", i, c.in, c.out, got) } } From ccef362786e61497fa02ba550dd84eb2f8b64b09 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 12:26:00 -0500 Subject: [PATCH 068/105] forkid,confp: forkid skips BestPractice (BP) transitions These are assumed to not be consensus-discriminating hard fork configs. Signed-off-by: meows --- core/forkid/forkid_test.go | 15 ++++++--------- params/confp/configurator.go | 10 ++++++++-- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index d275c0c4f6..8adab9ae60 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -161,10 +161,8 @@ func TestCreation(t *testing.T) { {9573000, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {9573001, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, {10500838, ID{Hash: checksumToBytes(0x7ba22882), Next: 10500839}}, - {10500839, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, - {10500840, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, - {11242399, ID{Hash: checksumToBytes(0x9007bfcc), Next: 11242400}}, - {11242400, ID{Hash: checksumToBytes(0x8999f519), Next: 0}}, + {10500839, ID{Hash: checksumToBytes(0x9007bfcc), Next: 0}}, + {10500840, ID{Hash: checksumToBytes(0x9007bfcc), Next: 0}}, }, }, { @@ -199,9 +197,8 @@ func TestCreation(t *testing.T) { {301243, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {301244, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, {999982, ID{Hash: checksumToBytes(0x604f6ee1), Next: 999983}}, - {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 2290740}}, - {2290739, ID{Hash: checksumToBytes(0xf42f5539), Next: 2290740}}, - {2290740, ID{Hash: checksumToBytes(0xafae52d3), Next: 0}}, + {999983, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, + {999984, ID{Hash: checksumToBytes(0xf42f5539), Next: 0}}, }, }, } @@ -324,7 +321,7 @@ func TestGatherForks(t *testing.T) { { "classic", params.ClassicChainConfig, - []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839, 11242400}, + []uint64{1150000, 2500000, 3000000, 5000000, 5900000, 8772000, 9573000, 10500839}, }, { "mainnet", @@ -334,7 +331,7 @@ func TestGatherForks(t *testing.T) { { "mordor", params.MordorChainConfig, - []uint64{301_243, 999_983, 2290740}, + []uint64{301_243, 999_983}, }, { "kotti", diff --git a/params/confp/configurator.go b/params/confp/configurator.go index 6266caea3b..8ea724577c 100644 --- a/params/confp/configurator.go +++ b/params/confp/configurator.go @@ -252,8 +252,14 @@ func Forks(conf ctypes.ChainConfigurator) []uint64 { var forks []uint64 var forksM = make(map[uint64]struct{}) // Will key for uniqueness as fork numbers are appended to slice. - transitions, _ := Transitions(conf) - for _, tr := range transitions { + transitions, names := Transitions(conf) + for i, tr := range transitions { + name := names[i] + // Skip "Best Practice"-namespaced transition names, assuming + // these will not be enforced as hardforks. + if strings.Contains(name, "BP") { + continue + } // Extract the fork rule block number and aggregate it response := tr() if response == nil || From 643359868e7ae0af98475504488ed1efe8b7eb8d Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 11 Sep 2020 12:50:07 -0500 Subject: [PATCH 069/105] core: improve and add AF log context when enabled Signed-off-by: meows --- core/blockchain.go | 6 +++++- core/blockchain_af.go | 11 ++++++----- core/blockchain_insert.go | 5 +++++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 250205f7b2..b42803c4b0 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1662,7 +1662,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) var ( - stats = insertStats{startTime: mclock.Now()} + stats = insertStats{ + startTime: mclock.Now(), + artificialFinality: bc.IsArtificialFinalityEnabled() && + bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentBlock().Number()), + } lastCanon *types.Block ) // Fire a single chain head event if we've progressed the chain diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 2fbf4eb39f..27165b914d 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -33,12 +33,13 @@ func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interfa statusLog = "Disabled" atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) } - configActivated := bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) - logFn := log.Debug // Deactivated - if configActivated && enable { + if !bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) { + // Don't log anything if the config hasn't enabled it yet. + return + } + logFn := log.Warn // Deactivated and enabled + if enable { logFn = log.Info // Activated and enabled - } else if configActivated && !enable { - logFn = log.Warn // Activated and disabled } logFn(fmt.Sprintf("%s artificial finality features", statusLog), logValues...) } diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index 5685b0a4bd..af36cd81cf 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -31,6 +31,7 @@ type insertStats struct { usedGas uint64 lastIndex int startTime mclock.AbsTime + artificialFinality bool } // statsReportLimit is the time limit during import and export after which we @@ -71,6 +72,10 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor if st.ignored > 0 { context = append(context, []interface{}{"ignored", st.ignored}...) } + if st.artificialFinality { + context = append(context, []interface{}{"af", st.artificialFinality}...) + } + log.Info("Imported new chain segment", context...) // Bump the stats reported to the next section From b0f01449b0986432488d337210f12a0a6d108bfd Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 10:19:36 -0500 Subject: [PATCH 070/105] params: no MESS activation set on ETC mainnet Signed-off-by: meows --- params/config_classic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config_classic.go b/params/config_classic.go index f6daf5a19c..0a3928b604 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP11355FBlock: big.NewInt(11242400), // ETA 20 Sept 2020, ~1500 UTC + ECBP11355FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), From e3c5da3d99b1f72062f298d344e4c2a8906a3302 Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 10:20:09 -0500 Subject: [PATCH 071/105] core: refactor and tinker with logic around equivalent short reorg acceptance Signed-off-by: meows --- core/blockchain_af.go | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 27165b914d..9fd585daec 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "math/rand" "sync/atomic" "github.com/ethereum/go-ethereum/core/types" @@ -69,7 +68,20 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), ).Float64() + // Time span diff. + // The minimum value is 1. x := float64(proposed.Time - commonAncestor.Time) + + // Commented now is a potential way to "soften" the acceptance while + // still avoiding discrete acceptance boundaries. In the case that ecpb11355 introduces + // unacceptable network inefficiency, this (or something similar) may be an option. + // // Accept with diminishing probability in the case of equivalent total difficulty. + // // Remember that the equivalent total difficulty case has ALREADY + // // passed one coin toss. + // if tdRatio == 1 && rand.Float64() < (1/x) { + // return nil + // } + antiGravity := ecbp11355AGSinusoidalA(x) if tdRatio < antiGravity { @@ -99,26 +111,6 @@ func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { return (ampl * math.Sin((x+phaseShift)/pDiv)) + ampl + 1 } -// ecbp11355AGSameSameShallowOK is an allowance arbitration function for chain segments -// of equal total difficulty using probability weighted toward short segments. -// Removing the probability and simply using a short cap may also work fine. -// If the unmoderated MESS algorithm turns out to generate an undesirable uncle rate, -// this may be a good solution. -func ecbp11355AGSameSameShallowOK(tdRatio float64, proposed, commonAncestor *types.Header) bool { - if tdRatio == 1 { - // If the segment is short and TD ratio is 1, make acceptance a probability, - // weighting toward short segments. - length := float64(proposed.Number.Uint64() - commonAncestor.Number.Uint64()) - if length <= 4 { - r := 1 / (length + 1) - if rand.Float64() < r { - return true - } - } - } - return false -} - /* ecbp11355AGExpB is an exponential function with x as a base (and rationalized exponent). From c8234bb76f805dc2e02e496c5f154563f6b25f32 Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 11:38:51 -0500 Subject: [PATCH 072/105] eth,web3ext: install admin_ecbp11355 method Signed-off-by: meows --- eth/api.go | 9 +++++++++ internal/web3ext/web3ext.go | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/eth/api.go b/eth/api.go index 6103ed4a04..b81cfdd086 100644 --- a/eth/api.go +++ b/eth/api.go @@ -264,6 +264,15 @@ func (api *PrivateAdminAPI) ImportChain(file string) (bool, error) { return true, nil } +func (api *PrivateAdminAPI) Ecbp11355(blockNr rpc.BlockNumber) (bool, error) { + i := uint64(blockNr.Int64()) + err := api.eth.blockchain.Config().SetECBP11355Transition(&i) + return api.eth.blockchain.IsArtificialFinalityEnabled() && + api.eth.blockchain.Config().IsEnabled( + api.eth.blockchain.Config().GetECBP11355Transition, + api.eth.blockchain.CurrentBlock().Number()), err +} + // PublicDebugAPI is the collection of Ethereum full node APIs exposed // over the public debugging endpoint. type PublicDebugAPI struct { diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index c6057f66d2..3f2804a0c6 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -181,6 +181,11 @@ web3._extend({ call: 'admin_importChain', params: 1 }), + new web3._extend.Method({ + name: 'ecbp11355', + call: 'admin_ecbp11355', + params: 1 + }), new web3._extend.Method({ name: 'sleepBlocks', call: 'admin_sleepBlocks', From e0cf2a0a16bcdba4114d96822ea6f18d5c6d0ba8 Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 11:46:35 -0500 Subject: [PATCH 073/105] main,utils: install --ecbp11355 flag for chain config overrides This is just a double-extra safety feature that will allow operators to disable the feature if necessary. Signed-off-by: meows --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 11 +++++++++++ 3 files changed, 13 insertions(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index ca7afafced..2bfbc37429 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -166,6 +166,7 @@ var ( utils.LegacyGpoPercentileFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, + utils.ECBP11355Flag, configFileFlag, } diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 4e87fcb515..316bc800a6 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -60,6 +60,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.IdentityFlag, utils.LightKDFFlag, utils.WhitelistFlag, + utils.ECBP11355Flag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 90a67a01f6..0162032287 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -749,6 +749,10 @@ var ( Usage: "External EVM configuration (default = built-in interpreter)", Value: "", } + ECBP11355Flag = cli.Uint64Flag{ + Name: "ecbp11355", + Usage: "Configure ECBP-11355 (MESS) block activation number", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1664,6 +1668,13 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if gen := genesisForCtxChainConfig(ctx); gen != nil { cfg.Genesis = gen } + // Handle temporary chain configuration override cases. + if ctx.GlobalIsSet(ECBP11355Flag.Name) { + n := ctx.GlobalUint64(ECBP11355Flag.Name) + if err := cfg.Genesis.Config.SetECBP11355Transition(&n); err != nil { + Fatalf("Failed to set ECBP-11355 activation number: %v", err) + } + } // Establish NetworkID. // If dev-mode is used, then NetworkID will be overridden. From 4fbe2d98bbbfde8c7590d6507af46ed63c3a6120 Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 12:39:31 -0500 Subject: [PATCH 074/105] main,utils,core,eth,web3ext,params,coregeth,ctypes,genesisT,goethereum,multigeth,parity: s/11355/1100/g Rel https://github.com/ethereumclassic/ECIPs/pull/373#pullrequestreview-487242717 Signed-off-by: meows --- cmd/geth/main.go | 2 +- cmd/geth/usage.go | 2 +- cmd/utils/flags.go | 14 +++++----- core/blockchain.go | 6 ++-- core/blockchain_af.go | 28 +++++++++---------- core/blockchain_af_test.go | 6 ++-- eth/api.go | 6 ++-- internal/web3ext/web3ext.go | 4 +-- params/config_classic.go | 4 +-- params/config_mordor.go | 2 +- params/types/coregeth/chain_config.go | 2 +- .../coregeth/chain_config_configurator.go | 8 +++--- params/types/ctypes/configurator_iface.go | 4 +-- params/types/genesisT/genesis.go | 8 +++--- .../goethereum/goethereum_configurator.go | 4 +-- .../multigethv0_chain_config_configurator.go | 4 +-- params/types/parity/parity_configurator.go | 4 +-- 17 files changed, 54 insertions(+), 54 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 2bfbc37429..020c9c0ee9 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -166,7 +166,7 @@ var ( utils.LegacyGpoPercentileFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, - utils.ECBP11355Flag, + utils.ECBP1100Flag, configFileFlag, } diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 316bc800a6..53dba45e36 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -60,7 +60,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.IdentityFlag, utils.LightKDFFlag, utils.WhitelistFlag, - utils.ECBP11355Flag, + utils.ECBP1100Flag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0162032287..27e3fcc3da 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -749,9 +749,9 @@ var ( Usage: "External EVM configuration (default = built-in interpreter)", Value: "", } - ECBP11355Flag = cli.Uint64Flag{ - Name: "ecbp11355", - Usage: "Configure ECBP-11355 (MESS) block activation number", + ECBP1100Flag = cli.Uint64Flag{ + Name: "ecbp1100", + Usage: "Configure ECBP-1100 (MESS) block activation number", } ) @@ -1669,10 +1669,10 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.Genesis = gen } // Handle temporary chain configuration override cases. - if ctx.GlobalIsSet(ECBP11355Flag.Name) { - n := ctx.GlobalUint64(ECBP11355Flag.Name) - if err := cfg.Genesis.Config.SetECBP11355Transition(&n); err != nil { - Fatalf("Failed to set ECBP-11355 activation number: %v", err) + if ctx.GlobalIsSet(ECBP1100Flag.Name) { + n := ctx.GlobalUint64(ECBP1100Flag.Name) + if err := cfg.Genesis.Config.SetECBP1100Transition(&n); err != nil { + Fatalf("Failed to set ECBP-1100 activation number: %v", err) } } diff --git a/core/blockchain.go b/core/blockchain.go index b42803c4b0..1571840be4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1551,8 +1551,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if d.err == nil { // Reorg data error was nil. // Proceed with further reorg arbitration. - if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, currentBlock.Number()) { - d.err = bc.ecbp11355(d.commonBlock.Header(), currentBlock.Header(), block.Header()) + if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { + d.err = bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()) } } // We leave the error to the reorg method to handle, if it wants to wrap it or log it or whatever. @@ -1665,7 +1665,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er stats = insertStats{ startTime: mclock.Now(), artificialFinality: bc.IsArtificialFinalityEnabled() && - bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentBlock().Number()), + bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, bc.CurrentBlock().Number()), } lastCanon *types.Block ) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 9fd585daec..e5812476b2 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -16,10 +16,10 @@ var errReorgFinality = errors.New("finality-enforced invalid new chain") // EnableArtificialFinality enables and disable artificial finality features for the blockchain. // Currently toggled features include: -// - ECBP11355-MESS: modified exponential subject scoring +// - ECBP1100-MESS: modified exponential subject scoring // // This level of activation works BELOW the chain configuration for any of the -// potential features. eg. If ECBP11355 is not activated at the chain config x block number, +// potential features. eg. If ECBP1100 is not activated at the chain config x block number, // then calling bc.EnableArtificialFinality(true) will be a noop. // The method is idempotent. func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interface{}) { @@ -32,7 +32,7 @@ func (bc *BlockChain) EnableArtificialFinality(enable bool, logValues ...interfa statusLog = "Disabled" atomic.StoreInt32(&bc.artificialFinalityEnabled, 0) } - if !bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP11355Transition, bc.CurrentHeader().Number) { + if !bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, bc.CurrentHeader().Number) { // Don't log anything if the config hasn't enabled it yet. return } @@ -50,10 +50,10 @@ func (bc *BlockChain) IsArtificialFinalityEnabled() bool { return atomic.LoadInt32(&bc.artificialFinalityEnabled) == 1 } -// ecpb11355 implements the "MESS" artificial finality mechanism +// ecpb1100 implements the "MESS" artificial finality mechanism // "Modified Exponential Subjective Scoring" used to prefer known chain segments // over later-to-come counterparts, especially proposed segments stretching far into the past. -func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) error { +func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) error { // Get the total difficulty ratio of the proposed chain segment over the existing one. commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) @@ -73,7 +73,7 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) x := float64(proposed.Time - commonAncestor.Time) // Commented now is a potential way to "soften" the acceptance while - // still avoiding discrete acceptance boundaries. In the case that ecpb11355 introduces + // still avoiding discrete acceptance boundaries. In the case that ecpb1100 introduces // unacceptable network inefficiency, this (or something similar) may be an option. // // Accept with diminishing probability in the case of equivalent total difficulty. // // Remember that the equivalent total difficulty case has ALREADY @@ -82,24 +82,24 @@ func (bc *BlockChain) ecbp11355(commonAncestor, current, proposed *types.Header) // return nil // } - antiGravity := ecbp11355AGSinusoidalA(x) + antiGravity := ecbp1100AGSinusoidalA(x) if tdRatio < antiGravity { // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). underpoweredBy := tdRatio / antiGravity - return fmt.Errorf("%w: ECPB11355-MESS: td.B/A%0.6f < antigravity%0.6f (under=%0.6f)", errReorgFinality, tdRatio, antiGravity, underpoweredBy) + return fmt.Errorf("%w: ECPB1100-MESS: td.B/A%0.6f < antigravity%0.6f (under=%0.6f)", errReorgFinality, tdRatio, antiGravity, underpoweredBy) } return nil } /* -ecbp11355AGSinusoidalA is a sinusoidal function. +ecbp1100AGSinusoidalA is a sinusoidal function. OPTION 3: Yet slower takeoff, yet steeper eventual ascent. Has a differentiable ceiling transition. h(x)=15 sin((x+12000 π)/(8000))+15+1 */ -func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { +func ecbp1100AGSinusoidalA(x float64) (antiGravity float64) { ampl := float64(15) // amplitude pDiv := float64(8000) // period divisor phaseShift := math.Pi * (pDiv * 1.5) @@ -112,17 +112,17 @@ func ecbp11355AGSinusoidalA(x float64) (antiGravity float64) { } /* -ecbp11355AGExpB is an exponential function with x as a base (and rationalized exponent). +ecbp1100AGExpB is an exponential function with x as a base (and rationalized exponent). OPTION 2: Slightly slower takeoff, steeper eventual ascent g(x)=x^(x*0.00002) */ -func ecbp11355AGExpB(x float64) (antiGravity float64) { +func ecbp1100AGExpB(x float64) (antiGravity float64) { return math.Pow(x, x*0.00002) } /* -ecbp11355AGExpA is an exponential function with x as exponent. +ecbp1100AGExpA is an exponential function with x as exponent. This was (one of?) Vitalik's "original" specs: > 1.0001 ** (number of seconds between when S1 was received and when S2 was received) @@ -133,6 +133,6 @@ This was (one of?) Vitalik's "original" specs: OPTION 1 (Original ESS) f(x)=1.0001^(x) */ -func ecbp11355AGExpA(x float64) (antiGravity float64) { +func ecbp1100AGExpA(x float64) (antiGravity float64) { return math.Pow(1.0001, x) } diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 473b0a515f..436ec1a9eb 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -54,7 +54,7 @@ func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardH return } -func TestBlockChain_AF_ECBP11355(t *testing.T) { +func TestBlockChain_AF_ECBP1100(t *testing.T) { yuckyGlobalTestEnableMess = true defer func() { yuckyGlobalTestEnableMess = false @@ -313,7 +313,7 @@ func TestBlockChain_GenerateMESSPlot(t *testing.T) { generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") } -func TestEcbp11355AGSinusoidalA(t *testing.T) { +func TestEcbp1100AGSinusoidalA(t *testing.T) { cases := []struct { in, out float64 }{ @@ -322,7 +322,7 @@ func TestEcbp11355AGSinusoidalA(t *testing.T) { } tolerance := 0.0000001 for i, c := range cases { - if got := ecbp11355AGSinusoidalA(c.in); got < c.out-tolerance || got > c.out+tolerance { + if got := ecbp1100AGSinusoidalA(c.in); got < c.out-tolerance || got > c.out+tolerance { t.Fatalf("%d: in: %0.6f want: %0.6f got: %0.6f", i, c.in, c.out, got) } } diff --git a/eth/api.go b/eth/api.go index b81cfdd086..e856b3e66d 100644 --- a/eth/api.go +++ b/eth/api.go @@ -264,12 +264,12 @@ func (api *PrivateAdminAPI) ImportChain(file string) (bool, error) { return true, nil } -func (api *PrivateAdminAPI) Ecbp11355(blockNr rpc.BlockNumber) (bool, error) { +func (api *PrivateAdminAPI) Ecbp1100(blockNr rpc.BlockNumber) (bool, error) { i := uint64(blockNr.Int64()) - err := api.eth.blockchain.Config().SetECBP11355Transition(&i) + err := api.eth.blockchain.Config().SetECBP1100Transition(&i) return api.eth.blockchain.IsArtificialFinalityEnabled() && api.eth.blockchain.Config().IsEnabled( - api.eth.blockchain.Config().GetECBP11355Transition, + api.eth.blockchain.Config().GetECBP1100Transition, api.eth.blockchain.CurrentBlock().Number()), err } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 3f2804a0c6..52cca43a4c 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -182,8 +182,8 @@ web3._extend({ params: 1 }), new web3._extend.Method({ - name: 'ecbp11355', - call: 'admin_ecbp11355', + name: 'ecbp1100', + call: 'admin_ecbp1100', params: 1 }), new web3._extend.Method({ diff --git a/params/config_classic.go b/params/config_classic.go index 0a3928b604..a0de5dd8f8 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP11355FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC + ECBP1100FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), @@ -134,7 +134,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000), ECIP1010PauseBlock: big.NewInt(3), ECIP1010Length: big.NewInt(2), - ECBP11355FBlock: big.NewInt(11), + ECBP1100FBlock: big.NewInt(11), } ) diff --git a/params/config_mordor.go b/params/config_mordor.go index 34ecde70d6..74460e50c9 100644 --- a/params/config_mordor.go +++ b/params/config_mordor.go @@ -71,7 +71,7 @@ var ( ECIP1017EraRounds: big.NewInt(2000000), ECIP1010PauseBlock: nil, ECIP1010Length: nil, - ECBP11355FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC + ECBP1100FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 840013: common.HexToHash("0x2ceada2b191879b71a5bcf2241dd9bc50d6d953f1640e62f9c2cee941dc61c9d"), 840014: common.HexToHash("0x8ec29dd692c8985b82410817bac232fc82805b746538d17bc924624fe74a0fcf"), diff --git a/params/types/coregeth/chain_config.go b/params/types/coregeth/chain_config.go index 33da281e0e..4f934c7745 100644 --- a/params/types/coregeth/chain_config.go +++ b/params/types/coregeth/chain_config.go @@ -173,7 +173,7 @@ type CoreGethChainConfig struct { ECIP1017EraRounds *big.Int `json:"ecip1017EraRounds,omitempty"` // ECIP1017 era rounds ECIP1080FBlock *big.Int `json:"ecip1080FBlock,omitempty"` - ECBP11355FBlock *big.Int `json:"ecbp11355FBlock,omitempty"` // ECBP11355:MESS artificial finality + ECBP1100FBlock *big.Int `json:"ecbp1100FBlock,omitempty"` // ECBP1100:MESS artificial finality DisposalBlock *big.Int `json:"disposalBlock,omitempty"` // Bomb disposal HF block SocialBlock *big.Int `json:"socialBlock,omitempty"` // Ethereum Social Reward block diff --git a/params/types/coregeth/chain_config_configurator.go b/params/types/coregeth/chain_config_configurator.go index ee6da04ab1..af4ea10f55 100644 --- a/params/types/coregeth/chain_config_configurator.go +++ b/params/types/coregeth/chain_config_configurator.go @@ -381,12 +381,12 @@ func (c *CoreGethChainConfig) SetEIP2537Transition(n *uint64) error { return nil } -func (c *CoreGethChainConfig) GetECBP11355Transition() *uint64 { - return bigNewU64(c.ECBP11355FBlock) +func (c *CoreGethChainConfig) GetECBP1100Transition() *uint64 { + return bigNewU64(c.ECBP1100FBlock) } -func (c *CoreGethChainConfig) SetECBP11355Transition(n *uint64) error { - c.ECBP11355FBlock = setBig(c.ECBP11355FBlock, n) +func (c *CoreGethChainConfig) SetECBP1100Transition(n *uint64) error { + c.ECBP1100FBlock = setBig(c.ECBP1100FBlock, n) return nil } diff --git a/params/types/ctypes/configurator_iface.go b/params/types/ctypes/configurator_iface.go index a3941f762b..2abb0e56c5 100644 --- a/params/types/ctypes/configurator_iface.go +++ b/params/types/ctypes/configurator_iface.go @@ -130,8 +130,8 @@ type ProtocolSpecifier interface { SetEIP1706Transition(n *uint64) error GetEIP2537Transition() *uint64 SetEIP2537Transition(n *uint64) error - GetECBP11355Transition() *uint64 - SetECBP11355Transition(n *uint64) error + GetECBP1100Transition() *uint64 + SetECBP1100Transition(n *uint64) error } type Forker interface { diff --git a/params/types/genesisT/genesis.go b/params/types/genesisT/genesis.go index ee3b942ce8..9c97f454a2 100644 --- a/params/types/genesisT/genesis.go +++ b/params/types/genesisT/genesis.go @@ -544,12 +544,12 @@ func (g *Genesis) SetEIP2537Transition(n *uint64) error { return g.Config.SetEIP2537Transition(n) } -func (g *Genesis) GetECBP11355Transition() *uint64 { - return g.Config.GetECBP11355Transition() +func (g *Genesis) GetECBP1100Transition() *uint64 { + return g.Config.GetECBP1100Transition() } -func (g *Genesis) SetECBP11355Transition(n *uint64) error { - return g.Config.SetECBP11355Transition(n) +func (g *Genesis) SetECBP1100Transition(n *uint64) error { + return g.Config.SetECBP1100Transition(n) } func (g *Genesis) IsEnabled(fn func() *uint64, n *big.Int) bool { diff --git a/params/types/goethereum/goethereum_configurator.go b/params/types/goethereum/goethereum_configurator.go index 99fdc76b25..7a526b1ade 100644 --- a/params/types/goethereum/goethereum_configurator.go +++ b/params/types/goethereum/goethereum_configurator.go @@ -391,11 +391,11 @@ func (c *ChainConfig) SetEIP2537Transition(n *uint64) error { return nil } -func (c *ChainConfig) GetECBP11355Transition() *uint64 { +func (c *ChainConfig) GetECBP1100Transition() *uint64 { return nil } -func (c *ChainConfig) SetECBP11355Transition(n *uint64) error { +func (c *ChainConfig) SetECBP1100Transition(n *uint64) error { if n == nil { return nil } diff --git a/params/types/multigeth/multigethv0_chain_config_configurator.go b/params/types/multigeth/multigethv0_chain_config_configurator.go index ebdf349272..bf3ed07972 100644 --- a/params/types/multigeth/multigethv0_chain_config_configurator.go +++ b/params/types/multigeth/multigethv0_chain_config_configurator.go @@ -416,11 +416,11 @@ func (c *ChainConfig) SetEIP2537Transition(n *uint64) error { return ctypes.ErrUnsupportedConfigFatal } -func (c *ChainConfig) GetECBP11355Transition() *uint64 { +func (c *ChainConfig) GetECBP1100Transition() *uint64 { return nil } -func (c *ChainConfig) SetECBP11355Transition(n *uint64) error { +func (c *ChainConfig) SetECBP1100Transition(n *uint64) error { if n == nil { return nil } diff --git a/params/types/parity/parity_configurator.go b/params/types/parity/parity_configurator.go index 7f82f26150..673e1b0be7 100644 --- a/params/types/parity/parity_configurator.go +++ b/params/types/parity/parity_configurator.go @@ -624,11 +624,11 @@ func (spec *ParityChainSpec) SetEIP2537Transition(n *uint64) error { return nil } -func (spec *ParityChainSpec) GetECBP11355Transition() *uint64 { +func (spec *ParityChainSpec) GetECBP1100Transition() *uint64 { return nil } -func (spec *ParityChainSpec) SetECBP11355Transition(n *uint64) error { +func (spec *ParityChainSpec) SetECBP1100Transition(n *uint64) error { if n == nil { return nil } From 52a0da13a74371ed4fe44ad818c2db022947218d Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 12:40:05 -0500 Subject: [PATCH 075/105] params: (lint): goimports -w Signed-off-by: meows --- params/config_classic.go | 4 ++-- params/config_mordor.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/params/config_classic.go b/params/config_classic.go index a0de5dd8f8..25ea00625d 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP1100FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC + ECBP1100FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), @@ -134,7 +134,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000), ECIP1010PauseBlock: big.NewInt(3), ECIP1010Length: big.NewInt(2), - ECBP1100FBlock: big.NewInt(11), + ECBP1100FBlock: big.NewInt(11), } ) diff --git a/params/config_mordor.go b/params/config_mordor.go index 74460e50c9..1198de55ac 100644 --- a/params/config_mordor.go +++ b/params/config_mordor.go @@ -71,7 +71,7 @@ var ( ECIP1017EraRounds: big.NewInt(2000000), ECIP1010PauseBlock: nil, ECIP1010Length: nil, - ECBP1100FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC + ECBP1100FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 840013: common.HexToHash("0x2ceada2b191879b71a5bcf2241dd9bc50d6d953f1640e62f9c2cee941dc61c9d"), 840014: common.HexToHash("0x8ec29dd692c8985b82410817bac232fc82805b746538d17bc924624fe74a0fcf"), From 0c467555680819c25e27e8bd6fa66908578801bc Mon Sep 17 00:00:00 2001 From: meows Date: Sat, 12 Sep 2020 13:06:58 -0500 Subject: [PATCH 076/105] confp: refactor method cross-compatibility namespacing logic This also introduces the no-compare for function Compatible, since we don't want to necessitate equivalence there either. Signed-off-by: meows --- params/confp/configurator.go | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/params/confp/configurator.go b/params/confp/configurator.go index 8ea724577c..942d154be0 100644 --- a/params/confp/configurator.go +++ b/params/confp/configurator.go @@ -21,12 +21,31 @@ import ( "math" "math/big" "reflect" + "regexp" "sort" "strings" "github.com/ethereum/go-ethereum/params/types/ctypes" ) +var ( + // compatibleProtocolNameSchemes define matchable naming schemes used by configuration methods + // that are not incompatible with configuration either having or lacking them. + compatibleProtocolNameSchemes = []string{ + "ECBP", // "Ethereum Classic Best Practice" + "EBP", // "Ethereum Best Practice" + } +) + +func nameSignalsCompatibility(name string) bool { + for _, s := range compatibleProtocolNameSchemes { + if regexp.MustCompile(s).MatchString(name) { + return true + } + } + return false +} + // ConfigCompatError is raised if the locally-stored blockchain is initialised with a // ChainConfig that would alter the past. type ConfigCompatError struct { @@ -133,6 +152,11 @@ func compatible(head *uint64, a, b ctypes.ChainConfigurator) *ConfigCompatError aFns, aNames := Transitions(a) bFns, _ := Transitions(b) for i, afn := range aFns { + // Skip cross-compatible namespaced transition names, assuming + // these will not be enforced as hardforks. + if nameSignalsCompatibility(aNames[i]) { + continue + } if err := func(c1, c2, head *uint64) *ConfigCompatError { if isForkIncompatible(c1, c2, head) { return NewCompatError("incompatible fork value: "+aNames[i], c1, c2) @@ -254,10 +278,9 @@ func Forks(conf ctypes.ChainConfigurator) []uint64 { transitions, names := Transitions(conf) for i, tr := range transitions { - name := names[i] - // Skip "Best Practice"-namespaced transition names, assuming + // Skip cross-compatible namespaced transition names, assuming // these will not be enforced as hardforks. - if strings.Contains(name, "BP") { + if nameSignalsCompatibility(names[i]) { continue } // Extract the fork rule block number and aggregate it From 5bc3fc2d1efdfbe6b2dd6add8015023de58dbe98 Mon Sep 17 00:00:00 2001 From: meows Date: Sun, 13 Sep 2020 13:58:07 -0500 Subject: [PATCH 077/105] core: fix typos, prettify ecbp1100 logging Signed-off-by: meows --- core/blockchain_af.go | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index e5812476b2..7531b48cbf 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -6,7 +6,9 @@ import ( "math" "math/big" "sync/atomic" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -50,11 +52,9 @@ func (bc *BlockChain) IsArtificialFinalityEnabled() bool { return atomic.LoadInt32(&bc.artificialFinalityEnabled) == 1 } -// ecpb1100 implements the "MESS" artificial finality mechanism -// "Modified Exponential Subjective Scoring" used to prefer known chain segments -// over later-to-come counterparts, especially proposed segments stretching far into the past. -func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) error { - +// getTDRatio is a helper function returning the total difficulty ratio of +// proposed over current chain segments. +func (bc *BlockChain) getTDRatio(commonAncestor, current, proposed *types.Header) float64 { // Get the total difficulty ratio of the proposed chain segment over the existing one. commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) @@ -67,13 +67,22 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) new(big.Float).SetInt(new(big.Int).Sub(proposedTD, commonAncestorTD)), new(big.Float).SetInt(new(big.Int).Sub(localTD, commonAncestorTD)), ).Float64() + return tdRatio +} + +// ecbp1100 implements the "MESS" artificial finality mechanism +// "Modified Exponential Subjective Scoring" used to prefer known chain segments +// over later-to-come counterparts, especially proposed segments stretching far into the past. +func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) error { + + tdRatio := bc.getTDRatio(commonAncestor, current, proposed) // Time span diff. // The minimum value is 1. x := float64(proposed.Time - commonAncestor.Time) // Commented now is a potential way to "soften" the acceptance while - // still avoiding discrete acceptance boundaries. In the case that ecpb1100 introduces + // still avoiding discrete acceptance boundaries. In the case that ecbp1100 introduces // unacceptable network inefficiency, this (or something similar) may be an option. // // Accept with diminishing probability in the case of equivalent total difficulty. // // Remember that the equivalent total difficulty case has ALREADY @@ -86,9 +95,19 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) if tdRatio < antiGravity { // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - underpoweredBy := tdRatio / antiGravity - return fmt.Errorf("%w: ECPB1100-MESS: td.B/A%0.6f < antigravity%0.6f (under=%0.6f)", errReorgFinality, tdRatio, antiGravity, underpoweredBy) + return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v blocks=%d td.B/A=%0.6f < antigravity=%0.6f`, + errReorgFinality, + common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), proposed.Number.Uint64()-commonAncestor.Number.Uint64(), + tdRatio, antiGravity, + ) } + log.Info("ECBP1100-MESS 🔓", + "status", "accepted", + "age", common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), + "blocks", proposed.Number.Uint64()-commonAncestor.Number.Uint64(), + "td.B/A", tdRatio, + "antigravity", antiGravity, + ) return nil } From 6c41aa06c2409f451c523f1743cc1ce8582b46a7 Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 17 Sep 2020 11:29:17 -0500 Subject: [PATCH 078/105] core: swap sin for polynomial (cubic) fn Allows the calculation to avoid ugly use of floats while keeping the curve very similar. Signed-off-by: meows --- core/blockchain_af.go | 70 ++++++++++++++++++++++++++------------ core/blockchain_af_test.go | 8 ++--- 2 files changed, 53 insertions(+), 25 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 7531b48cbf..649855eef0 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + emath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -75,42 +76,69 @@ func (bc *BlockChain) getTDRatio(commonAncestor, current, proposed *types.Header // over later-to-come counterparts, especially proposed segments stretching far into the past. func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) error { - tdRatio := bc.getTDRatio(commonAncestor, current, proposed) - - // Time span diff. - // The minimum value is 1. - x := float64(proposed.Time - commonAncestor.Time) + // Get the total difficulties of the proposed chain segment and the existing one. + commonAncestorTD := bc.GetTd(commonAncestor.Hash(), commonAncestor.Number.Uint64()) + proposedParentTD := bc.GetTd(proposed.ParentHash, proposed.Number.Uint64()-1) + proposedTD := new(big.Int).Add(proposed.Difficulty, proposedParentTD) + localTD := bc.GetTd(current.Hash(), current.Number.Uint64()) - // Commented now is a potential way to "soften" the acceptance while - // still avoiding discrete acceptance boundaries. In the case that ecbp1100 introduces - // unacceptable network inefficiency, this (or something similar) may be an option. - // // Accept with diminishing probability in the case of equivalent total difficulty. - // // Remember that the equivalent total difficulty case has ALREADY - // // passed one coin toss. - // if tdRatio == 1 && rand.Float64() < (1/x) { - // return nil - // } + // if proposed_subchain_td * CURVE_FUNCTION_DENOMINATOR < get_curve_function_numerator(proposed.Time - commonAncestor.Time) * local_subchain_td. + proposedSubchainTD := new(big.Int).Sub(proposedTD, commonAncestorTD) + localSubchainTD := new(big.Int).Sub(localTD, commonAncestorTD) - antiGravity := ecbp1100AGSinusoidalA(x) + got := proposedSubchainTD.Int64() * ecbp1100PolynomialVCurveFunctionDenominator + want := ecbp1100PolynomialV(int64(proposed.Time - commonAncestor.Time)) * localSubchainTD.Int64() - if tdRatio < antiGravity { - // Using "b/a" here as "'B' chain vs. 'A' chain", where A is original (current), and B is proposed (new). - return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v blocks=%d td.B/A=%0.6f < antigravity=%0.6f`, + if got < want { + return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v blocks=%d rat=%0.6f`, errReorgFinality, common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), proposed.Number.Uint64()-commonAncestor.Number.Uint64(), - tdRatio, antiGravity, + float64(got) / float64(want), ) } log.Info("ECBP1100-MESS 🔓", "status", "accepted", "age", common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), "blocks", proposed.Number.Uint64()-commonAncestor.Number.Uint64(), - "td.B/A", tdRatio, - "antigravity", antiGravity, + "rat", float64(got) / float64(want), ) return nil } +/* +ecbp1100PolynomialV is a cubic function that looks a lot like Option 3's sin function, +but adds the benefit that the calculation can be done with integers (instead of yucky floating points). +> https://github.com/ethereumclassic/ECIPs/issues/374#issuecomment-694156719 + +CURVE_FUNCTION_DENOMINATOR = 128 + +def get_curve_function_numerator(time_delta: int) -> int: + xcap = 25132 # = floor(8000*pi) + ampl = 15 + height = CURVE_FUNCTION_DENOMINATOR * (ampl * 2) + if x > xcap: + x = xcap + # The sine approximator `y = 3*x**2 - 2*x**3` rescaled to the desired height and width + return CURVE_FUNCTION_DENOMINATOR + (3 * x**2 - 2 * x**3 // xcap) * height // xcap ** 2 + + +The if tdRatio < antiGravity check would then be + +if proposed_subchain_td * CURVE_FUNCTION_DENOMINATOR < get_curve_function_numerator(proposed.Time - commonAncestor.Time) * local_subchain_td. +*/ +func ecbp1100PolynomialV(x int64) int64 { + if x > ecbp1100PolynomialVXCap { + x = ecbp1100PolynomialVXCap + } + return ecbp1100PolynomialVCurveFunctionDenominator + + ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCap)) * + ecbp1100PolynomialVHeight / (emath.BigPow(ecbp1100PolynomialVXCap, 2).Int64()) +} +var ecbp1100PolynomialVCurveFunctionDenominator = int64(128) +var ecbp1100PolynomialVXCap = int64(25132) +var ecbp1100PolynomialVAmpl = int64(15) +var ecbp1100PolynomialVHeight = ecbp1100PolynomialVCurveFunctionDenominator * ecbp1100PolynomialVAmpl * 2 + /* ecbp1100AGSinusoidalA is a sinusoidal function. diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 436ec1a9eb..a20d8c6dbf 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -157,20 +157,20 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 200, 300, + 500, 250, 250, 0, -9, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 200, 300, + 500, 250, 250, 7, -9, false, false, }, // Hard has insufficient total difficulty / length and is rejected. { - 500, 200, 300, - 17, -9, + 500, 300, 200, + 13, -9, false, false, }, // Hard has sufficient total difficulty / length and is accepted. From e4658422bfa40967256a32131dd5d3f9d03deeba Mon Sep 17 00:00:00 2001 From: meows Date: Sun, 20 Sep 2020 14:31:51 -0500 Subject: [PATCH 079/105] core: add ECBP1100 arbitration logic to writeKnownBlock This fixes an implementation bug where known block writes would sidestep the ECBP arbitration. Signed-off-by: meows --- core/blockchain.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/blockchain.go b/core/blockchain.go index 1571840be4..d966e2f47a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1422,6 +1422,19 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { current := bc.CurrentBlock() if block.ParentHash() != current.Hash() { d := bc.getReorgData(current, block) + if d.err == nil { + // Reorg data error was nil. + // Proceed with further reorg arbitration. + + // If the node is mining and trying to insert their own block, we want to allow that. + minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) + if (bc.shouldPreserve == nil || !minerOwn) && + bc.IsArtificialFinalityEnabled() && + bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { + + d.err = bc.ecbp1100(d.commonBlock.Header(), current.Header(), block.Header()) + } + } if err := bc.reorg(d); err != nil { return err } From f1c300e54f7793b16cdef85fb34da66f76e0cf17 Mon Sep 17 00:00:00 2001 From: meows Date: Mon, 21 Sep 2020 10:57:51 -0500 Subject: [PATCH 080/105] core: adds test TestAFKnownBlock Shows how and why implementing without writeKnownBlock is incorrect. Signed-off-by: meows --- core/blockchain_af_test.go | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index a20d8c6dbf..4365e92b08 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -327,3 +327,46 @@ func TestEcbp1100AGSinusoidalA(t *testing.T) { } } } + +/* +TestAFKnownBlock tests that AF functionality works for chain re-insertions. + +Chain re-insertions use BlockChain.writeKnownBlock, where first-pass insertions +will hit writeBlockWithState. + +AF needs to be implemented at both sites to prevent re-proposed chains from sidestepping +the AF criteria. + */ +func TestAFKnownBlock(t *testing.T) { + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + // genesis.Timestamp = 1 + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(true) + + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, 1000, func(i int, gen *BlockGen) { + gen.OffsetTime(0) + }) + easyN, err := chain.InsertChain(easy) + if err != nil { + t.Fatal(err) + } + hard, _ := GenerateChain(genesis.Config, easy[easyN-100], engine, db, 100, func(i int, gen *BlockGen) { + gen.OffsetTime(-7) + }) + if _, err := chain.InsertChain(hard); err == nil { + t.Error("hard 1 inserted") + } + if _, err := chain.InsertChain(hard); err == nil { + t.Error("hard 2 inserted") + } +} + From 493ec1de460b182527edac634ef0e689c5210577 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:11:04 -0500 Subject: [PATCH 081/105] core: use polynomial big int, refactor AF cond installation locales This commit created by checkouting core/ package at development commit 0b162cdf49. Signed-off-by: meows --- core/blockchain.go | 85 ++++-- core/blockchain_af.go | 115 ++++++-- core/blockchain_af_test.go | 542 ++++++++++++++++++++++++++++++++++++- 3 files changed, 694 insertions(+), 48 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index d966e2f47a..a644d90e1a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1413,28 +1413,16 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e return nil } -// writeKnownBlock updates the head block flag with a known block +// writeKnownBlockAsHead updates the head block flag with a known block // and introduces chain reorg if necessary. -func (bc *BlockChain) writeKnownBlock(block *types.Block) error { +// In ethereum/go-ethereum this is called writeKnownBlock. Same logic, better name. +func (bc *BlockChain) writeKnownBlockAsHead(block *types.Block) error { bc.wg.Add(1) defer bc.wg.Done() current := bc.CurrentBlock() if block.ParentHash() != current.Hash() { d := bc.getReorgData(current, block) - if d.err == nil { - // Reorg data error was nil. - // Proceed with further reorg arbitration. - - // If the node is mining and trying to insert their own block, we want to allow that. - minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) - if (bc.shouldPreserve == nil || !minerOwn) && - bc.IsArtificialFinalityEnabled() && - bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { - - d.err = bc.ecbp1100(d.commonBlock.Header(), current.Header(), block.Header()) - } - } if err := bc.reorg(d); err != nil { return err } @@ -1558,6 +1546,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } if reorg { + // If code reaches AF check, and it does not error, canonical status will be allowed (not disallowed). + canonicalDisallowed := false + if block.ParentHash() != currentBlock.Hash() { // Reorganise the chain if the parent is not the head block d := bc.getReorgData(currentBlock, block) @@ -1565,16 +1556,25 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Reorg data error was nil. // Proceed with further reorg arbitration. if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { - d.err = bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()) + if err := bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()); err != nil { + log.Warn("Reorg disallowed", "error", err) + canonicalDisallowed = true + } } } - // We leave the error to the reorg method to handle, if it wants to wrap it or log it or whatever. - if err := bc.reorg(d); err != nil { - return NonStatTy, err + // If there is an error, we leave it to the reorg method to handle, if it wants to wrap it or log it or whatever. + if !canonicalDisallowed { + if err := bc.reorg(d); err != nil { + return NonStatTy, err + } } } // Status is canon; reorg succeeded. - status = CanonStatTy + if !canonicalDisallowed { + status = CanonStatTy + } else { + status = SideStatTy + } } else { status = SideStatTy } @@ -1717,15 +1717,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil ) for block != nil && err == ErrKnownBlock { + finalityDisallowed := false externTd = new(big.Int).Add(externTd, block.Difficulty()) if localTd.Cmp(externTd) < 0 { - break + // Have found a known block with GREATER THAN local total difficulty. + // Do not ignore this block, and as such, do not continue inserter iteration. + + // Check if known block write will cause a reorg. + if block.ParentHash() != current.Hash() { + reorgData := bc.getReorgData(current, block) + if reorgData.err == nil { + // If the reorgData is NOT nil, we know that the writeKnownBlockAsHead -> reorg + // logic will return the error. + // We let that part of the flow handle that error. + // We're only concerned with the non-error case, where the reorg + // will be permitted. + + // It will. That means we are on a different chain currently. + // Check if artificial finality forbids the reorganization, + // effectively overriding the simple (original) TD comparison check. + minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) + if (bc.shouldPreserve == nil || !minerOwn) && + bc.IsArtificialFinalityEnabled() && + bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { + + if err := bc.ecbp1100(reorgData.commonBlock.Header(), current.Header(), block.Header()); err != nil { + log.Warn("Reorg disallowed", "error", err) + finalityDisallowed = true + } + } + } + } + if !finalityDisallowed { + break + } + // finalityDisallowed == true + // Total difficulty was greater, but that condition has been overridden by the artificial + // finality check. Continue like nothing happened. } + + // Local vs. External total difficulty was less than or equal. + // This block is deep in our chain and is not a head contender. log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) stats.ignored++ block, err = it.next() } + // The remaining blocks are still known blocks, the only scenario here is: // During the fast sync, the pivot point is already submitted but rollback // happens. Then node resets the head full block to a lower height via `rollback` @@ -1735,8 +1773,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // `insertChain` while a part of them have higher total difficulty than current // head full block(new pivot point). for block != nil && err == ErrKnownBlock { + log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) - if err := bc.writeKnownBlock(block); err != nil { + if err := bc.writeKnownBlockAsHead(block); err != nil { return it.index, err } lastCanon = block @@ -1813,7 +1852,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er log.Error("Please file an issue, skip known block execution without receipt", "hash", block.Hash(), "number", block.NumberU64()) } - if err := bc.writeKnownBlock(block); err != nil { + if err := bc.writeKnownBlockAsHead(block); err != nil { return it.index, err } stats.processed++ diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 649855eef0..d34cee22f3 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -86,21 +86,37 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) proposedSubchainTD := new(big.Int).Sub(proposedTD, commonAncestorTD) localSubchainTD := new(big.Int).Sub(localTD, commonAncestorTD) - got := proposedSubchainTD.Int64() * ecbp1100PolynomialVCurveFunctionDenominator - want := ecbp1100PolynomialV(int64(proposed.Time - commonAncestor.Time)) * localSubchainTD.Int64() + xBig := big.NewInt(int64(proposed.Time - commonAncestor.Time)) + eq := ecbp1100PolynomialV(xBig) + want := eq.Mul(eq, localSubchainTD) - if got < want { - return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v blocks=%d rat=%0.6f`, + got := new(big.Int).Mul(proposedSubchainTD, ecbp1100PolynomialVCurveFunctionDenominator) + + prettyRatio, _ := new(big.Float).Quo( + new(big.Float).SetInt(got), + new(big.Float).SetInt(want), + ).Float64() + + if got.Cmp(want) < 0 { + return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v current.span=%v proposed.span=%v common.bno=%d current.bno=%d proposed.bno=%d tdr/gravity=%0.6f`, errReorgFinality, - common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), proposed.Number.Uint64()-commonAncestor.Number.Uint64(), - float64(got) / float64(want), + common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), + common.PrettyDuration(time.Duration(current.Time - commonAncestor.Time)*time.Second), + common.PrettyDuration(time.Duration(int32(xBig.Uint64()))*time.Second), + commonAncestor.Number.Uint64(), + current.Number.Uint64(), proposed.Number.Uint64(), + prettyRatio, ) } log.Info("ECBP1100-MESS 🔓", "status", "accepted", "age", common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), - "blocks", proposed.Number.Uint64()-commonAncestor.Number.Uint64(), - "rat", float64(got) / float64(want), + "current.span", common.PrettyDuration(time.Duration(current.Time - commonAncestor.Time)*time.Second), + "proposed.span", common.PrettyDuration(time.Duration(int32(xBig.Uint64()))*time.Second), + "common.bno", commonAncestor.Number.Uint64(), + "current.bno", current.Number.Uint64(), + "proposed.bno", proposed.Number.Uint64(), + "tdr/gravity", prettyRatio, ) return nil } @@ -126,18 +142,78 @@ The if tdRatio < antiGravity check would then be if proposed_subchain_td * CURVE_FUNCTION_DENOMINATOR < get_curve_function_numerator(proposed.Time - commonAncestor.Time) * local_subchain_td. */ -func ecbp1100PolynomialV(x int64) int64 { - if x > ecbp1100PolynomialVXCap { - x = ecbp1100PolynomialVXCap - } - return ecbp1100PolynomialVCurveFunctionDenominator + - ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCap)) * - ecbp1100PolynomialVHeight / (emath.BigPow(ecbp1100PolynomialVXCap, 2).Int64()) +func ecbp1100PolynomialV(x *big.Int) *big.Int { + + // Make a copy; do not mutate argument value. + + // if x > xcap: + // x = xcap + xA := big.NewInt(0) + xA.Set(emath.BigMin(x, ecbp1100PolynomialVXCap)) + + xB := big.NewInt(0) + xB.Set(emath.BigMin(x, ecbp1100PolynomialVXCap)) + + out := big.NewInt(0) + + // 3 * x**2 + xA.Exp(xA, big2, nil) + xA.Mul(xA, big3) + + // 3 * x**2 // xcap + xB.Exp(xB, big3, nil) + xB.Mul(xB, big2) + xB.Div(xB, ecbp1100PolynomialVXCap) + + // (3 * x**2 - 2 * x**3 // xcap) + out.Sub(xA, xB) + + // // (3 * x**2 - 2 * x**3 // xcap) * height + out.Mul(out, ecbp1100PolynomialVHeight) + + // xcap ** 2 + xcap2 := new(big.Int).Exp(ecbp1100PolynomialVXCap, big2, nil) + + // (3 * x**2 - 2 * x**3 // xcap) * height // xcap ** 2 + out.Div(out, xcap2) + + // CURVE_FUNCTION_DENOMINATOR + (3 * x**2 - 2 * x**3 // xcap) * height // xcap ** 2 + out.Add(out, ecbp1100PolynomialVCurveFunctionDenominator) + return out } -var ecbp1100PolynomialVCurveFunctionDenominator = int64(128) -var ecbp1100PolynomialVXCap = int64(25132) -var ecbp1100PolynomialVAmpl = int64(15) -var ecbp1100PolynomialVHeight = ecbp1100PolynomialVCurveFunctionDenominator * ecbp1100PolynomialVAmpl * 2 + +var big0 = big.NewInt(0) +var big2 = big.NewInt(2) +var big3 = big.NewInt(3) + +// ecbp1100PolynomialVCurveFunctionDenominator +// CURVE_FUNCTION_DENOMINATOR = 128 +var ecbp1100PolynomialVCurveFunctionDenominator = big.NewInt(128) + +// ecbp1100PolynomialVXCap +// xcap = 25132 # = floor(8000*pi) +var ecbp1100PolynomialVXCap = big.NewInt(25132) + +// ecbp1100PolynomialVAmpl +// ampl = 15 +var ecbp1100PolynomialVAmpl = big.NewInt(15) + +// ecbp1100PolynomialVHeight +// height = CURVE_FUNCTION_DENOMINATOR * (ampl * 2) +var ecbp1100PolynomialVHeight = new(big.Int).Mul(new(big.Int).Mul(ecbp1100PolynomialVCurveFunctionDenominator, ecbp1100PolynomialVAmpl), big2) + +// func ecbp1100PolynomialV(x int64) int64 { +// if x > ecbp1100PolynomialVXCap { +// x = ecbp1100PolynomialVXCap +// } +// return ecbp1100PolynomialVCurveFunctionDenominator + +// ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCap)) * +// ecbp1100PolynomialVHeight / (emath.BigPow(ecbp1100PolynomialVXCap, 2).Int64()) +// } +// var ecbp1100PolynomialVCurveFunctionDenominator = int64(128) +// var ecbp1100PolynomialVXCap = int64(25132) +// var ecbp1100PolynomialVAmpl = int64(15) +// var ecbp1100PolynomialVHeight = ecbp1100PolynomialVCurveFunctionDenominator * ecbp1100PolynomialVAmpl * 2 /* ecbp1100AGSinusoidalA is a sinusoidal function. @@ -183,3 +259,4 @@ f(x)=1.0001^(x) func ecbp1100AGExpA(x float64) (antiGravity float64) { return math.Pow(1.0001, x) } + diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 4365e92b08..76e4860247 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -5,6 +5,7 @@ import ( "image/color" "log" "math" + "math/big" "math/rand" "testing" @@ -55,6 +56,7 @@ func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardH } func TestBlockChain_AF_ECBP1100(t *testing.T) { + t.Skip("These have been disused as of the sinusoidal -> cubic change.") yuckyGlobalTestEnableMess = true defer func() { yuckyGlobalTestEnableMess = false @@ -65,6 +67,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { easyOffset, hardOffset int64 hardGetsHead, accepted bool }{ + // INDEX=0 // Hard has insufficient total difficulty / length and is rejected. { 5000, 7500, 2500, @@ -95,6 +98,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { 0, -8, true, true, }, + // INDEX=5 // Hard has sufficient total difficulty / length and is accepted. { 500, 4, 496, @@ -125,6 +129,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { 0, -9, true, true, }, + // INDEX=10 // Hard has sufficient total difficulty / length and is accepted. { 500, 9, 491, @@ -155,6 +160,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { 0, -9, true, true, }, + // // INDEX=15 // Hard has insufficient total difficulty / length and is rejected. { 500, 250, 250, @@ -185,6 +191,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { 47, -8, false, false, }, + // // INDEX=20 // Hard has insufficient total difficulty / length and is rejected. { 500, 200, 300, @@ -216,6 +223,7 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { 60, -9, false, true, }, + // INDEX=25 // Hard is shorter, but sufficiently heavier chain, is accepted. { 500, 100, 390, @@ -235,6 +243,76 @@ func TestBlockChain_AF_ECBP1100(t *testing.T) { } } +func TestBlockChain_AF_ECBP1100_2(t *testing.T) { + yuckyGlobalTestEnableMess = true + defer func() { + yuckyGlobalTestEnableMess = false + }() + + cases := []struct { + easyLen, hardLen, commonAncestorN int + easyOffset, hardOffset int64 + hardGetsHead, accepted bool + }{ + // Random coin tosses involved for equivalent difficulty. + // { + // 1000, 1, 999, + // 0, 0, // -1 offset => 10-1=9 same child difficulty + // false, true, + // }, + // { + // 1000, 3, 997, + // 0, 0, // -1 offset => 10-1=9 same child difficulty + // false, true, + // }, + // { + // 1000, 10, 990, + // 0, 0, // -1 offset => 10-1=9 same child difficulty + // false, true, + // }, + { + 1000, 1, 999, + 0, -2, // better difficulty + true, true, + }, + { + 1000, 25, 975, + 0, -2, // better difficulty + true, true, + }, + { + 1000, 30, 970, + 0, -2, // better difficulty + false, true, + }, + { + 1000, 50, 950, + 0, -5, + true, true, + }, + { + 1000, 50, 950, + 0, -1, + false, true, + }, + { + 1000, 999, 1, + 0, -9, + true, true, + }, + } + + for i, c := range cases { + hardHead, err := runMESSTest(t, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { + t.Errorf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", + i, + c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, + c.accepted, c.hardGetsHead, hardHead, err) + } + } +} + func TestBlockChain_GenerateMESSPlot(t *testing.T) { t.Skip("This test plots graph of chain acceptance for visualization.") @@ -331,7 +409,7 @@ func TestEcbp1100AGSinusoidalA(t *testing.T) { /* TestAFKnownBlock tests that AF functionality works for chain re-insertions. -Chain re-insertions use BlockChain.writeKnownBlock, where first-pass insertions +Chain re-insertions use BlockChain.writeKnownBlockAsHead, where first-pass insertions will hit writeBlockWithState. AF needs to be implemented at both sites to prevent re-proposed chains from sidestepping @@ -359,14 +437,466 @@ func TestAFKnownBlock(t *testing.T) { if err != nil { t.Fatal(err) } - hard, _ := GenerateChain(genesis.Config, easy[easyN-100], engine, db, 100, func(i int, gen *BlockGen) { + hard, _ := GenerateChain(genesis.Config, easy[easyN-300], engine, db, 300, func(i int, gen *BlockGen) { gen.OffsetTime(-7) }) - if _, err := chain.InsertChain(hard); err == nil { - t.Error("hard 1 inserted") + // writeBlockWithState + if _, err := chain.InsertChain(hard); err != nil { + t.Error("hard 1 not inserted (should be side)") + } + // writeKnownBlockAsHead + if _, err := chain.InsertChain(hard); err != nil { + t.Error("hard 2 inserted (will have 'ignored' known blocks, and never tried a reorg)") + } + hardHeadHash := hard[len(hard)-1].Hash() + if chain.CurrentBlock().Hash() == hardHeadHash { + t.Fatal("hard block got chain head, should be side") } - if _, err := chain.InsertChain(hard); err == nil { - t.Error("hard 2 inserted") + if h := chain.GetHeaderByHash(hardHeadHash); h == nil { + t.Fatal("missing hard block (should be imported as side, but still available)") } } +func TestPlot_ecbp1100PolynomialV(t *testing.T) { + p, err := plot.New() + if err != nil { + panic(err) + } + p.Title.Text = "Functions" + p.X.Label.Text = "X" + p.Y.Label.Text = "Y" + + poly := plotter.NewFunction(func(f float64) float64 { + n := big.NewInt(int64(f)) + y := ecbp1100PolynomialV(n) + ff, _ := new(big.Float).SetInt(y).Float64() + return ff + }) + p.Add(poly) + + p.X.Min = 0 + p.X.Max = 100000 + p.Y.Min = 0 + p.Y.Max = 5000 + + if err := p.Save(1000, 1000, "ecbp1100-polynomial.png"); err != nil { + t.Fatal(err) + } +} + +func TestEcbp1100PolynomialV(t *testing.T) { + t.Log( + ecbp1100PolynomialV(big.NewInt(99)), + ecbp1100PolynomialV(big.NewInt(999)), + ecbp1100PolynomialV(big.NewInt(99999))) +} + +func TestGenerateChainTargetingHashrate(t *testing.T) { + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + // genesis.Timestamp = 1 + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(true) + + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, 1000, func(i int, gen *BlockGen) { + gen.OffsetTime(0) + }) + if _, err := chain.InsertChain(easy); err != nil { + t.Fatal(err) + } + firstDifficulty := chain.CurrentHeader().Difficulty + targetDifficultyRatio := big.NewInt(2) + targetDifficulty := new(big.Int).Div(firstDifficulty, targetDifficultyRatio) + for chain.CurrentHeader().Difficulty.Cmp(targetDifficulty) > 0 { + next, _ := GenerateChain(genesis.Config, chain.CurrentBlock(), engine, db, 1, func(i int, gen *BlockGen) { + gen.OffsetTime(8) // 8: (=10+8=18>(13+4=17).. // minimum value over stable range + }) + if _, err := chain.InsertChain(next); err != nil { + t.Fatal(err) + } + } + t.Log(chain.CurrentBlock().Number()) +} + + +func TestBlockChain_AF_Difficulty_Develop(t *testing.T) { + t.Skip("Development version of tests with plotter") + // Generate the original common chain segment and the two competing forks + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + // genesis.Timestamp = 1 + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(true) + + cases := []struct { + easyLen, hardLen, commonAncestorN int + easyOffset, hardOffset int64 + hardGetsHead, accepted bool + }{ + // { + // 1000, 800, 200, + // 10, 1, + // true, true, + // }, + // { + // 1000, 800, 200, + // 60, 1, + // true, true, + // }, + // { + // 10000, 8000, 2000, + // 60, 1, + // true, true, + // }, + // { + // 20000, 18000, 2000, + // 10, 1, + // true, true, + // }, + // { + // 20000, 18000, 2000, + // 60, 1, + // true, true, + // }, + // { + // 10000, 8000, 2000, + // 10, 20, + // true, true, + // }, + + // { + // 1000, 1, 999, + // 10, 1, + // true, true, + // }, + // { + // 1000, 10, 990, + // 10, 1, + // true, true, + // }, + // { + // 1000, 100, 900, + // 10, 1, + // true, true, + // }, + // { + // 1000, 200, 800, + // 10, 1, + // true, true, + // }, + // { + // 1000, 500, 500, + // 10, 1, + // true, true, + // }, + // { + // 1000, 999, 1, + // 10, 1, + // true, true, + // }, + // { + // 5000, 4000, 1000, + // 10, 1, + // true, true, + // }, + + // { + // 10000, 9000, 1000, + // 10, 1, + // true, true, + // }, + // + // { + // 7000, 6500, 500, + // 10, 1, + // true, true, + // }, + + // { + // 100, 90, 10, + // 10, 1, + // true, true, + // }, + + // { + // 1000, 1, 999, + // 10, 1, + // true, true, + // }, + // { + // 1000, 2, 998, + // 10, 1, + // true, true, + // }, + // { + // 1000, 3, 997, + // 10, 1, + // true, true, + // }, + // { + // 1000, 1, 999, + // 10, 8, + // true, true, + // }, + + { + 1000, 50, 950, + 10, 9, + false, false, + }, + { + 1000, 100, 900, + 10, 8, + false, false, + }, + { + 1000, 100, 900, + 10, 7, + false, false, + }, + { + 1000, 50, 950, + 10, 5, + true, true, + }, + { + 1000, 50, 950, + 10, 3, + true, true, + }, + //5 + { + 1000, 100, 900, + 10, 3, + false, false, + }, + { + 1000, 200, 800, + 10, 3, + false, false, + }, + { + 1000, 200, 800, + 10, 1, + false, false, + }, + } + + // poissonTime := func(b *BlockGen, seconds int64) { + // poisson := distuv.Poisson{Lambda: float64(seconds)} + // r := poisson.Rand() + // if r < 1 { + // r = 1 + // } + // if r > float64(seconds) * 1.5 { + // r = float64(seconds) + // } + // chainreader := &fakeChainReader{config: b.config} + // b.header.Time = b.parent.Time() + uint64(r) + // b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header()) + // for err := b.engine.VerifyHeader(chainreader, b.header, false); + // err != nil && err != consensus.ErrUnknownAncestor && b.header.Time > b.parent.Header().Time; { + // t.Log(err) + // r -= 1 + // b.header.Time = b.parent.Time() + uint64(r) + // b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header()) + // } + // } + + type ratioComparison struct { + tdRatio float64 + penalty float64 + } + gotRatioComparisons := []ratioComparison{} + + for i, c := range cases { + + if err := chain.Reset(); err != nil { + t.Fatal(err) + } + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, c.easyLen, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + // poissonTime(b, c.easyOffset) + b.OffsetTime(c.easyOffset - 10) + }) + commonAncestor := easy[c.commonAncestorN-1] + hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, c.hardLen, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + // poissonTime(b, c.hardOffset) + b.OffsetTime(c.hardOffset - 10) + }) + if _, err := chain.InsertChain(easy); err != nil { + t.Fatal(err) + } + n, err := chain.InsertChain(hard) + hardHead := chain.CurrentBlock().Hash() == hard[len(hard)-1].Hash() + + commons := plotter.XYs{} + easys := plotter.XYs{} + hards := plotter.XYs{} + tdrs := plotter.XYs{} + antigravities := plotter.XYs{} + antigravities2 := plotter.XYs{} + + balance := plotter.XYs{} + + for i := 0; i < c.easyLen; i++ { + td := chain.GetTd(easy[i].Hash(), easy[i].NumberU64()) + point := plotter.XY{X: float64(easy[i].NumberU64()), Y: float64(td.Uint64())} + if i <= c.commonAncestorN { + commons = append(commons, point) + } else { + easys = append(easys, point) + } + } + // td ratios + // for j := 0; j < c.hardLen; j++ { + for j := 0; j < n; j++ { + + td := chain.GetTd(hard[j].Hash(), hard[j].NumberU64()) + if td != nil { + point := plotter.XY{X: float64(hard[j].NumberU64()), Y: float64(td.Uint64())} + hards = append(hards, point) + } + + if commonAncestor.NumberU64() != uint64(c.commonAncestorN) { + t.Fatalf("bad test common=%d easy=%d can=%d", commonAncestor.NumberU64(), c.easyLen, c.commonAncestorN) + } + + ee := c.commonAncestorN + j + easyHeader := easy[ee].Header() + hardHeader := hard[j].Header() + if easyHeader.Number.Uint64() != hardHeader.Number.Uint64() { + t.Fatalf("bad test easyheader=%d hardheader=%d", easyHeader.Number.Uint64(), hardHeader.Number.Uint64()) + } + + /* + HERE LIES THE RUB (IN MY GRAPHS). + + + */ + // y := chain.getTDRatio(commonAncestor.Header(), easyHeader, hardHeader) // <- unit x unit + + // y := chain.getTDRatio(commonAncestor.Header(), easy[c.easyLen-1].Header(), hardHeader) + + y := chain.getTDRatio(commonAncestor.Header(), chain.CurrentHeader(), hardHeader) + + if j == 0 { + t.Logf("case=%d first.hard.tdr=%v", i, y) + } + + ecbp := ecbp1100AGSinusoidalA(float64(hardHeader.Time - commonAncestor.Header().Time)) + + if j == n-1 { + gotRatioComparisons = append(gotRatioComparisons, ratioComparison{ + tdRatio: y, penalty: ecbp, + }) + } + + // Exploring alternative penalty functions. + ecbp2 := ecbp1100AGExpA(float64(hardHeader.Time - commonAncestor.Header().Time)) + // t.Log(y, ecbp, ecbp2) + + tdrs = append(tdrs, plotter.XY{X: float64(hard[j].NumberU64()), Y: y}) + antigravities = append(antigravities, plotter.XY{X: float64(hard[j].NumberU64()), Y: ecbp}) + antigravities2 = append(antigravities2, plotter.XY{X: float64(hard[j].NumberU64()), Y: ecbp2}) + + balance = append(balance, plotter.XY{X: float64(hardHeader.Number.Uint64()), Y: y - ecbp}) + } + scatterCommons, _ := plotter.NewScatter(commons) + scatterEasys, _ := plotter.NewScatter(easys) + scatterHards, _ := plotter.NewScatter(hards) + + scatterTDRs, _ := plotter.NewScatter(tdrs) + scatterAntigravities, _ := plotter.NewScatter(antigravities) + scatterAntigravities2, _ := plotter.NewScatter(antigravities2) + balanceScatter, _ := plotter.NewScatter(balance) + + scatterCommons.Color = color.RGBA{R: 190, G: 197, B: 236, A: 255} + scatterCommons.Shape = draw.CircleGlyph{} + scatterCommons.Radius = 2 + scatterEasys.Color = color.RGBA{R: 152, G: 236, B: 161, A: 255} // green + scatterEasys.Shape = draw.CircleGlyph{} + scatterEasys.Radius = 2 + scatterHards.Color = color.RGBA{R: 236, G: 106, B: 94, A: 255} + scatterHards.Shape = draw.CircleGlyph{} + scatterHards.Radius = 2 + + p, perr := plot.New() + if perr != nil { + log.Panic(perr) + } + p.Add(scatterCommons) + p.Legend.Add("Commons", scatterCommons) + p.Add(scatterEasys) + p.Legend.Add("Easys", scatterEasys) + p.Add(scatterHards) + p.Legend.Add("Hards", scatterHards) + p.Title.Text = fmt.Sprintf("TD easy=%d hard=%d", c.easyOffset, c.hardOffset) + p.Save(1000, 600, fmt.Sprintf("plot-td-%d-%d-%d-%d-%d.png", c.easyLen, c.commonAncestorN, c.hardLen, c.easyOffset, c.hardOffset)) + + p, perr = plot.New() + if perr != nil { + log.Panic(perr) + } + + scatterTDRs.Color = color.RGBA{R: 236, G: 106, B: 94, A: 255} // red + scatterTDRs.Radius = 3 + scatterTDRs.Shape = draw.PyramidGlyph{} + p.Add(scatterTDRs) + p.Legend.Add("TD Ratio", scatterTDRs) + + scatterAntigravities.Color = color.RGBA{R: 190, G: 197, B: 236, A: 255} // blue + scatterAntigravities.Radius = 3 + scatterAntigravities.Shape = draw.PlusGlyph{} + p.Add(scatterAntigravities) + p.Legend.Add("(Anti)Gravity Penalty", scatterAntigravities) + + scatterAntigravities2.Color = color.RGBA{R: 152, G: 236, B: 161, A: 255} // green + scatterAntigravities2.Radius = 3 + scatterAntigravities2.Shape = draw.PlusGlyph{} + // p.Add(scatterAntigravities2) + // p.Legend.Add("(Anti)Gravity Penalty (Alternate)", scatterAntigravities2) + + p.Title.Text = fmt.Sprintf("TD Ratio easy=%d hard=%d", c.easyOffset, c.hardOffset) + p.Save(1000, 600, fmt.Sprintf("plot-td-ratio-%d-%d-%d-%d-%d.png", c.easyLen, c.commonAncestorN, c.hardLen, c.easyOffset, c.hardOffset)) + + p, perr = plot.New() + if perr != nil { + log.Panic(perr) + } + p.Title.Text = fmt.Sprintf("TD Ratio - Antigravity Penalty easy=%d hard=%d", c.easyOffset, c.hardOffset) + balanceScatter.Color = color.RGBA{R: 235, G: 92, B: 236, A: 255} // purple + balanceScatter.Radius = 3 + balanceScatter.Shape = draw.PlusGlyph{} + p.Add(balanceScatter) + p.Legend.Add("TDR - Penalty", balanceScatter) + p.Save(1000, 600, fmt.Sprintf("plot-td-ratio-diff-%d-%d-%d-%d-%d.png", c.easyLen, c.commonAncestorN, c.hardLen, c.easyOffset, c.hardOffset)) + + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { + compared := gotRatioComparisons[i] + t.Errorf(`case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v +got.tdr=%v got.pen=%v`, + i, + c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, + c.accepted, c.hardGetsHead, hardHead, err, compared.tdRatio, compared.penalty) + } + } + +} From 056c0fc588c1667e35d45ffdeda39de69420bd57 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:14:07 -0500 Subject: [PATCH 082/105] core: fix merge-missed minerOwn condition in AF arbiter Signed-off-by: meows --- core/blockchain.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index a644d90e1a..e19e057a56 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1555,7 +1555,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if d.err == nil { // Reorg data error was nil. // Proceed with further reorg arbitration. - if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { + // If the node is mining and trying to insert their own block, we want to allow that (do not override miners). + minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) + if (bc.shouldPreserve == nil || !minerOwn) && + bc.IsArtificialFinalityEnabled() && + bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { + if err := bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()); err != nil { log.Warn("Reorg disallowed", "error", err) canonicalDisallowed = true From 93381aecbc670522676074d48b593e6c7e0a156f Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:16:46 -0500 Subject: [PATCH 083/105] core: fix AF logging to be less talky Also improves a graph generated by a test. This commit was created by gitcheckouting development commit 01406c762f. Signed-off-by: meows --- core/blockchain.go | 35 ++++++++++++++++++++++++++++------- core/blockchain_af.go | 17 ++++------------- core/blockchain_af_test.go | 9 ++++++--- 3 files changed, 38 insertions(+), 23 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index e19e057a56..35bb930343 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1562,12 +1562,26 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { if err := bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()); err != nil { - log.Warn("Reorg disallowed", "error", err) + canonicalDisallowed = true + log.Warn("Reorg disallowed", "error", err) + + } else if len(d.oldChain) > 3 { + + // Reorg is allowed, only log the MESS line if old chain is longer than normal. + log.Info("ECBP1100-MESS 🔓", + "status", "accepted", + "age", common.PrettyAge(time.Unix(int64(d.commonBlock.Time()), 0)), + "current.span", common.PrettyDuration(time.Duration(currentBlock.Time()-d.commonBlock.Time())*time.Second), + "proposed.span", common.PrettyDuration(time.Duration(int32(block.Time()))*time.Second), + "common.bno", d.commonBlock.Number().Uint64(), "common.hash", d.commonBlock.Hash(), + "current.bno", currentBlock.Number().Uint64(), "current.hash", currentBlock.Hash(), + "proposed.bno", block.Number().Uint64(), "proposed.hash", block.Hash(), + ) } } } - // If there is an error, we leave it to the reorg method to handle, if it wants to wrap it or log it or whatever. + // If there was a reorg(data) error, we leave it to the reorg method to handle, if it wants to wrap it or log it or whatever. if !canonicalDisallowed { if err := bc.reorg(d); err != nil { return NonStatTy, err @@ -1722,7 +1736,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil ) for block != nil && err == ErrKnownBlock { - finalityDisallowed := false + + // canonicalDisallowed is set to true if the total difficulty is greater than + // our local head, but the segment fails to meet the criteria required by any artificial finality features, + // namely that it requires a reorg (parent != current) and does not meet an inflated difficulty ratio. + canonicalDisallowed := false + externTd = new(big.Int).Add(externTd, block.Difficulty()) if localTd.Cmp(externTd) < 0 { // Have found a known block with GREATER THAN local total difficulty. @@ -1747,16 +1766,18 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { if err := bc.ecbp1100(reorgData.commonBlock.Header(), current.Header(), block.Header()); err != nil { - log.Warn("Reorg disallowed", "error", err) - finalityDisallowed = true + + canonicalDisallowed = true + log.Trace("Reorg disallowed", "error", err) + } } } } - if !finalityDisallowed { + if !canonicalDisallowed { break } - // finalityDisallowed == true + // canonicalDisallowed == true // Total difficulty was greater, but that condition has been overridden by the artificial // finality check. Continue like nothing happened. } diff --git a/core/blockchain_af.go b/core/blockchain_af.go index d34cee22f3..b21ebb8918 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -98,26 +98,17 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) ).Float64() if got.Cmp(want) < 0 { - return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v current.span=%v proposed.span=%v common.bno=%d current.bno=%d proposed.bno=%d tdr/gravity=%0.6f`, + return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v current.span=%v proposed.span=%v tdr/gravity=%0.6f common.bno=%d common.hash=%s current.bno=%d current.hash=%s proposed.bno=%d proposed.hash=%s`, errReorgFinality, common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), common.PrettyDuration(time.Duration(current.Time - commonAncestor.Time)*time.Second), common.PrettyDuration(time.Duration(int32(xBig.Uint64()))*time.Second), - commonAncestor.Number.Uint64(), - current.Number.Uint64(), proposed.Number.Uint64(), prettyRatio, + commonAncestor.Number.Uint64(), commonAncestor.Hash().Hex(), + current.Number.Uint64(), current.Hash().Hex(), + proposed.Number.Uint64(), proposed.Hash().Hex(), ) } - log.Info("ECBP1100-MESS 🔓", - "status", "accepted", - "age", common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), - "current.span", common.PrettyDuration(time.Duration(current.Time - commonAncestor.Time)*time.Second), - "proposed.span", common.PrettyDuration(time.Duration(int32(xBig.Uint64()))*time.Second), - "common.bno", commonAncestor.Number.Uint64(), - "current.bno", current.Number.Uint64(), - "proposed.bno", proposed.Number.Uint64(), - "tdr/gravity", prettyRatio, - ) return nil } diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 76e4860247..aa4f8be493 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -314,7 +314,7 @@ func TestBlockChain_AF_ECBP1100_2(t *testing.T) { } func TestBlockChain_GenerateMESSPlot(t *testing.T) { - t.Skip("This test plots graph of chain acceptance for visualization.") + // t.Skip("This test plots graph of chain acceptance for visualization.") easyLen := 200 maxHardLen := 100 @@ -462,7 +462,7 @@ func TestPlot_ecbp1100PolynomialV(t *testing.T) { if err != nil { panic(err) } - p.Title.Text = "Functions" + p.Title.Text = "ECBP1100 Polynomial Curve Function" p.X.Label.Text = "X" p.Y.Label.Text = "Y" @@ -475,10 +475,13 @@ func TestPlot_ecbp1100PolynomialV(t *testing.T) { p.Add(poly) p.X.Min = 0 - p.X.Max = 100000 + p.X.Max = 30000 p.Y.Min = 0 p.Y.Max = 5000 + p.Y.Label.Text = "Antigravity imposition" + p.X.Label.Text = "Seconds difference between local head and proposed common ancestor" + if err := p.Save(1000, 1000, "ecbp1100-polynomial.png"); err != nil { t.Fatal(err) } From 5ef64c2a249153c5af7a33984ff67c07e7e06917 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:25:44 -0500 Subject: [PATCH 084/105] core: lowbump logging of MESS-approval to >2 segments Signed-off-by: meows --- core/blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index 35bb930343..d0f1963331 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1566,7 +1566,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. canonicalDisallowed = true log.Warn("Reorg disallowed", "error", err) - } else if len(d.oldChain) > 3 { + } else if len(d.oldChain) > 2 { // Reorg is allowed, only log the MESS line if old chain is longer than normal. log.Info("ECBP1100-MESS 🔓", From 6c01dad2c4e49a64d00ebd426ccc67b45536ddf0 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:31:39 -0500 Subject: [PATCH 085/105] core: skip development and graphing tests Signed-off-by: meows --- core/blockchain_af_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index aa4f8be493..6c1c765d80 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -314,7 +314,7 @@ func TestBlockChain_AF_ECBP1100_2(t *testing.T) { } func TestBlockChain_GenerateMESSPlot(t *testing.T) { - // t.Skip("This test plots graph of chain acceptance for visualization.") + t.Skip("This test plots graph of chain acceptance for visualization.") easyLen := 200 maxHardLen := 100 @@ -458,6 +458,7 @@ func TestAFKnownBlock(t *testing.T) { } func TestPlot_ecbp1100PolynomialV(t *testing.T) { + t.Skip("This test plots a graph of the ECBP1100 polynomial curve.") p, err := plot.New() if err != nil { panic(err) @@ -495,6 +496,7 @@ func TestEcbp1100PolynomialV(t *testing.T) { } func TestGenerateChainTargetingHashrate(t *testing.T) { + t.Skip("A development test to play with difficulty steps.") engine := ethash.NewFaker() db := rawdb.NewMemoryDatabase() From a294c66a7cdcc5c51325f305e2b67da7ad34f1d2 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:33:20 -0500 Subject: [PATCH 086/105] core: add more illustrative tests for diffic/length scaling Signed-off-by: meows --- core/blockchain_af_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 6c1c765d80..1a9f0b83ca 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -300,6 +300,32 @@ func TestBlockChain_AF_ECBP1100_2(t *testing.T) { 0, -9, true, true, }, + { + 1000, 999, 1, + 0, -8, + false, true, + }, + { + 1000, 500, 500, + 0, -8, + true, true, + }, + { + 1000, 500, 500, + 0, -7, + false, true, + }, + { + 1000, 300, 700, + 0, -7, + false, true, + }, + // Will pass, takes a long time. + // { + // 5000, 4000, 1000, + // 0, -9, + // true, true, + // }, } for i, c := range cases { From 131ddf5a267651ca0410a3213cecbd2ecc02cb52 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 07:35:47 -0500 Subject: [PATCH 087/105] params: install classic ECBP1100 activation 11377500 Signed-off-by: meows --- params/config_classic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config_classic.go b/params/config_classic.go index 25ea00625d..904a845400 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP1100FBlock: nil, // big.NewInt(11295300), // Tentative: ETA 28 Sept 2020, ~1500 UTC + ECBP1100FBlock: big.NewInt(11_377_500), // ETA 30 Sept 2020 RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), From 60b28313dd7a105dc83bc698a4512203be2d2767 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 08:10:38 -0500 Subject: [PATCH 088/105] core: uncomment int64 version of polynomialv fn This is just for documentation and reference, doing the sam e math but with different var types. Signed-off-by: meows --- core/blockchain_af.go | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index b21ebb8918..1f3839e1e1 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -193,18 +193,21 @@ var ecbp1100PolynomialVAmpl = big.NewInt(15) // height = CURVE_FUNCTION_DENOMINATOR * (ampl * 2) var ecbp1100PolynomialVHeight = new(big.Int).Mul(new(big.Int).Mul(ecbp1100PolynomialVCurveFunctionDenominator, ecbp1100PolynomialVAmpl), big2) -// func ecbp1100PolynomialV(x int64) int64 { -// if x > ecbp1100PolynomialVXCap { -// x = ecbp1100PolynomialVXCap -// } -// return ecbp1100PolynomialVCurveFunctionDenominator + -// ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCap)) * -// ecbp1100PolynomialVHeight / (emath.BigPow(ecbp1100PolynomialVXCap, 2).Int64()) -// } -// var ecbp1100PolynomialVCurveFunctionDenominator = int64(128) -// var ecbp1100PolynomialVXCap = int64(25132) -// var ecbp1100PolynomialVAmpl = int64(15) -// var ecbp1100PolynomialVHeight = ecbp1100PolynomialVCurveFunctionDenominator * ecbp1100PolynomialVAmpl * 2 +/* +ecbp1100PolynomialVI64 is an int64 implementation of ecbp1100PolynomialV. + */ +func ecbp1100PolynomialVI64(x int64) int64 { + if x > ecbp1100PolynomialVXCapI64 { + x = ecbp1100PolynomialVXCapI64 + } + return ecbp1100PolynomialVCurveFunctionDenominatorI64 + + ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCapI64)) * + ecbp1100PolynomialVHeightI64 / (emath.BigPow(ecbp1100PolynomialVXCapI64, 2).Int64()) +} +var ecbp1100PolynomialVCurveFunctionDenominatorI64 = int64(128) +var ecbp1100PolynomialVXCapI64 = int64(25132) +var ecbp1100PolynomialVAmplI64 = int64(15) +var ecbp1100PolynomialVHeightI64 = ecbp1100PolynomialVCurveFunctionDenominatorI64 * ecbp1100PolynomialVAmplI64 * 2 /* ecbp1100AGSinusoidalA is a sinusoidal function. From 1e7f628fc0523477ebfbde664f9558fb6bfd54d4 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 08:28:35 -0500 Subject: [PATCH 089/105] core: (lint) goimports -w, unnecessary conversions Signed-off-by: meows --- core/blockchain_af.go | 11 +++++------ core/blockchain_af_test.go | 3 +-- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 1f3839e1e1..90670df25a 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -101,7 +101,7 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v current.span=%v proposed.span=%v tdr/gravity=%0.6f common.bno=%d common.hash=%s current.bno=%d current.hash=%s proposed.bno=%d proposed.hash=%s`, errReorgFinality, common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), - common.PrettyDuration(time.Duration(current.Time - commonAncestor.Time)*time.Second), + common.PrettyDuration(time.Duration(current.Time-commonAncestor.Time)*time.Second), common.PrettyDuration(time.Duration(int32(xBig.Uint64()))*time.Second), prettyRatio, commonAncestor.Number.Uint64(), commonAncestor.Hash().Hex(), @@ -173,7 +173,6 @@ func ecbp1100PolynomialV(x *big.Int) *big.Int { return out } -var big0 = big.NewInt(0) var big2 = big.NewInt(2) var big3 = big.NewInt(3) @@ -195,15 +194,16 @@ var ecbp1100PolynomialVHeight = new(big.Int).Mul(new(big.Int).Mul(ecbp1100Polyno /* ecbp1100PolynomialVI64 is an int64 implementation of ecbp1100PolynomialV. - */ +*/ func ecbp1100PolynomialVI64(x int64) int64 { if x > ecbp1100PolynomialVXCapI64 { x = ecbp1100PolynomialVXCapI64 } return ecbp1100PolynomialVCurveFunctionDenominatorI64 + - ((3 * emath.BigPow(int64(x), 2).Int64()) - (2 * emath.BigPow(int64(x), 3).Int64() / ecbp1100PolynomialVXCapI64)) * - ecbp1100PolynomialVHeightI64 / (emath.BigPow(ecbp1100PolynomialVXCapI64, 2).Int64()) + ((3*emath.BigPow(x, 2).Int64())-(2*emath.BigPow(x, 3).Int64()/ecbp1100PolynomialVXCapI64))* + ecbp1100PolynomialVHeightI64/(emath.BigPow(ecbp1100PolynomialVXCapI64, 2).Int64()) } + var ecbp1100PolynomialVCurveFunctionDenominatorI64 = int64(128) var ecbp1100PolynomialVXCapI64 = int64(25132) var ecbp1100PolynomialVAmplI64 = int64(15) @@ -253,4 +253,3 @@ f(x)=1.0001^(x) func ecbp1100AGExpA(x float64) (antiGravity float64) { return math.Pow(1.0001, x) } - diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 1a9f0b83ca..bfa8988fb0 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -440,7 +440,7 @@ will hit writeBlockWithState. AF needs to be implemented at both sites to prevent re-proposed chains from sidestepping the AF criteria. - */ +*/ func TestAFKnownBlock(t *testing.T) { engine := ethash.NewFaker() @@ -557,7 +557,6 @@ func TestGenerateChainTargetingHashrate(t *testing.T) { t.Log(chain.CurrentBlock().Number()) } - func TestBlockChain_AF_Difficulty_Develop(t *testing.T) { t.Skip("Development version of tests with plotter") // Generate the original common chain segment and the two competing forks From ce11ef9e310bda744f948cbac457d4b4efc2b845 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 10:20:19 -0500 Subject: [PATCH 090/105] core: refactor minerOwn logic to be simpler Signed-off-by: meows --- core/blockchain.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index d0f1963331..24492bf13f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1557,7 +1557,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Proceed with further reorg arbitration. // If the node is mining and trying to insert their own block, we want to allow that (do not override miners). minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) - if (bc.shouldPreserve == nil || !minerOwn) && + if !minerOwn && bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { @@ -1761,7 +1761,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // Check if artificial finality forbids the reorganization, // effectively overriding the simple (original) TD comparison check. minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) - if (bc.shouldPreserve == nil || !minerOwn) && + if !minerOwn && bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { From 98b9defc4ed2eb8162b47d50e99aa245cd3a7b73 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 13:46:46 -0500 Subject: [PATCH 091/105] core,core/reorgs-MESS.png: use current time instead of proposed time for curve domain This drastically changes the canonical allowance, truncating it to a hard about 30 blocks. Signed-off-by: meows --- core/blockchain_af.go | 2 +- core/blockchain_af_test.go | 10 +++++----- core/reorgs-MESS.png | Bin 0 -> 45031 bytes 3 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 core/reorgs-MESS.png diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 90670df25a..0c5abee7e7 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -86,7 +86,7 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) proposedSubchainTD := new(big.Int).Sub(proposedTD, commonAncestorTD) localSubchainTD := new(big.Int).Sub(localTD, commonAncestorTD) - xBig := big.NewInt(int64(proposed.Time - commonAncestor.Time)) + xBig := big.NewInt(int64(current.Time - commonAncestor.Time)) eq := ecbp1100PolynomialV(xBig) want := eq.Mul(eq, localSubchainTD) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index bfa8988fb0..82609374e5 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -340,10 +340,10 @@ func TestBlockChain_AF_ECBP1100_2(t *testing.T) { } func TestBlockChain_GenerateMESSPlot(t *testing.T) { - t.Skip("This test plots graph of chain acceptance for visualization.") + // t.Skip("This test plots graph of chain acceptance for visualization.") - easyLen := 200 - maxHardLen := 100 + easyLen := 500 + maxHardLen := 400 generatePlot := func(title, fileName string) { p, err := plot.New() @@ -352,7 +352,7 @@ func TestBlockChain_GenerateMESSPlot(t *testing.T) { } p.Title.Text = title p.X.Label.Text = "Block Depth" - p.Y.Label.Text = "Relative Block Time Delta (10 seconds + y)" + p.Y.Label.Text = "Mode Block Time Offset (10 seconds + y)" accepteds := plotter.XYs{} rejecteds := plotter.XYs{} @@ -414,7 +414,7 @@ func TestBlockChain_GenerateMESSPlot(t *testing.T) { baseTitle := fmt.Sprintf("Accept/Reject Reorgs: Relative Time (Difficulty) over Proposed Segment Length (%d-block original chain)", easyLen) generatePlot(baseTitle, "reorgs-MESS.png") yuckyGlobalTestEnableMess = false - generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") + // generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") } func TestEcbp1100AGSinusoidalA(t *testing.T) { diff --git a/core/reorgs-MESS.png b/core/reorgs-MESS.png new file mode 100644 index 0000000000000000000000000000000000000000..ddee1dd59436c9a8680b1cb2805aa6c6a0b7b484 GIT binary patch literal 45031 zcmbrm2T)Yo76pibjKTwvoRlO%5R@DQ6-kmrqBbZQK|m0iCOrWWBuW&8Mv{o+9Gf5@ zB0`gqoO8|@X7|(g|DUOunyMMgvfA$3H=MK2+H0-7PX}nI-91M_PlAVscTP$1wiX^9 z{tZ05Q(;7>;hohNR6%%nf8#0LmeWSYEe;X7cTJuUu5y(+F4`}jB69aEC;1gHA=FOcxZJB2bR9p_Y}rOZ8AM)CKa zI;u3P?!{IN?u5^TtD{#p!{f9l&f+eL`;+>He~IAtOFX=9Gk;#0{heRP0Iyqr{QRk^ zrpD>3or8Fks1fPxsC*i`!Dm=6%W?P4ouMj^3bW?u<>lpK^}x|uA3}Y7edW}S-rnA( z5-H__=77x1OhR&I3VC-Ak0|BCf)tAUf`W+fa3VHOPfxNsv5n|)Qd4<(Z$wUxMxuPG za#TtuOKR_%fPgXA;pXOyCk=11pXCHne{mdjmEq*{qZnUW%Ida$)Pd)815cfdk?0hf z(Q*3c+SXRQxTE=%;>UaIQ!VdrNV@Olirq=o>i+T|Lp?#}!53|Z!{G$aeWm1fG8UOZ z4*tH}$93_NF7;^!S;dd*{5)=GW@+!OC3)g`Vt31F`sbZr?s!ts_4<7&*C4f3N*L>g4!nrTS!eI0J4;zslXU z&hK0TYCb)wWrSId;v%1(20UN+iv?w>RBayK2Vc7K&6s%N{v(q$s_7 z`O@jf`cyMEUb0b8)!qGYb*wH`-LTM7v+xBa9o=4w=*Zy)R<^`$#Ce?`?$x#@v#%}j z7HJV{%&6G20ZrJBRf2^phR06H&Y2%FWHq(|2>N ztgKclH(Kihs0>S<9W~Gxy;J2}OHI7>TIAW_yO6b>93!vI+4dh%yy|GFG6V)ySTpRD zzR;V4eSagvbz@rgaMi~N^%si2JyzI)Q!P%+qBTy}$fHQZF0#l1i-vPzHak?#Tidu% za>0ZP#;U$1XJHmL|JfQZNk~RJ`tvY`-*CG00|w3o-@5_#b(*{gPoswzZ}qmA=OGeC zx6As4mX=lkf8>noT&LH`(N1a%g7WMb1g3A8K=tAJuRIe;=S5l>_tJ`rgT4X_CTx~U z%+;fV9RYb^v5<)P-6pOC3i<8jAvbuE1h3;Vi0ymtuPZ4jUHN+H%`I!|FBKJTmX@15cAR6Uw>M$34he!jcb7|L?8 zGvXzCylW!=9VT53hR9`QSj4uUHuzgjP0f}2pDwr_=Zl3fzyl}U37oxuO%wN>g`DcD z-SNQwqNk$PU9(;3_X}AsU6N1LvK=gaE7S7xr<03IHH3i=A$c|eW4*Ph+0)dKs&eM^ z>69g>huDq76s3r1Y=V)mnRIzvT%3%@epiO>3XB4b_7dFY#J6B7Dk_!KYv+AmzowGM zo#Te|rAtZXE$9~~Z|qs5$NDdJ^Z*C*kzCMPEurI2j{0|(!N8A8$w>0lWJ3RGE2 z4#gsx(Xbq@J}k{8vMapddAPg3wIG?DH?Ks}@Wqjvg3WZMHD2waWM>Pa5}Eg?!g*(T z$g%1io(92V_w%d1$hp)Q&%G%c`S0+=D&_$&y@B2N!=49AFd7{%C72r8=QtypMn=45 z+Y&=8KKCvJE0{d~P`lv{=P@%fD79x}V;2{XmBBbK(X*L0g>$@mAK(*!@nCz2x1W-wy#Oo2z{se|lJ@CSd081|Z~8-O2PNTRFxJRz zsR+O}Q2iW1-GE*7qtD_rwvA4H-3U&zBBKgrH8nLIo#+Q63zNI$%qTFfj^@Y!dP~XGYe8U(- zpSd6J&F~u(5|NTJNFfC=-pjM?$+%@`yK#Z!&QgET%1E_i|Bb+YX2CEmX^YjGm7OZMQ}Qe!(9MoGBYtP&CStKQg*-+?9R|l zkaiQ{B&i({=zF#QiDT7(rj#OLPF0srdvo}jRQg!_CXp-o|%U$Z2ycEzMrPX zZ!bxJk8j&wsFc$iIP=KR_Iw7)r*;(}{$U>JDguLQ?(!K|BMa5yZtFR108O14? zm|z~3&NE501^+6v>V(_^5C62NzbS&t!rXj$uuPbf(|ok%B@Cxy?(`iL#&fvRRVA9w zAvc>xC$kjB5R$}IgTh%z8GZR?f$f$yz1ffAC0;N~Ay<2iy#A6eel|2eJ>65`VmH?b zkkXcRhJcJhow&H41{O*DyLZ1Ju@Ri3j}v#~1T1+z^#k(jTM& zo(6;d`CGme1CMGD;U@Y57uC2i`hCEs5qLr>~Coqym*U4p5US-hI4*^dk43cRKQef`R~} zZb(U0!rc~2c0oFXz%GL6I?XU`B9~BXH-g@t+5R+8@NB4jb#)c86x`E-lf=Mjx28N7 z?WCmSWQaHU1Br~MmGN_hgy-ltf&Ktcouj|*U&SjT;tb1SXJsS@f!QCcdu>=`y*2E% zqU1(N_+c6-4&ix5MP=pntt}>LRADJ*vFXQ;BnHKF?Vb!>JIKCH<*Y;y#}W?Xw;}c* zcihd9?yL4XDRZ0w3dwN8`Z|QdD|xmhb9i)M9CHf`479YRwgbhHk&$^Ojc*q04|mt> zs*kphj}FY-81RsMky(LlcpoicH183T3skOto$E}SqiIM23X^SAEpz?46I>Uji;$_c zNiFWi)1L8$z{ViDWDRbELd&;r-?qbY3g!Evo5M^?D~98E1?KGz>kufB%BeaTsl9j! zVh%}wB>6GL<`x!4)t(qw_nUK&LJf+nA3!u~CM&d{rQO<-?)ZjXNVO7a+3c( z-SXk}d8U3y0c!B9fG&{eeZ%TrlXuo)H|IJ(T(`;x3{FW&0f=mCZhpWMHBsa=_7z`_ zf>G3GW)IdNBhVUPWSMYLi0e7MY`Ai=Lh$=*Cf_PuHvpmSuG34pEV~0g)I1EkwI3wCC`H|lM@rd)?J@}{rUwVyfjb(Fx~8QPCH$z zp{{Ns#>g{WHwV4=#a=5zCkt+>+NMuPUJ;^%MgVcxp(1bu1418c4&wu`3ClDckt6G} z{1Bl0zLwTvmTUUuyCLix94}_uYS)q$3>hVy{3$xJ3`^*Q%^Ah)|E8d1VS(qIGrCl0 zkkx&EP9kw5Z-}P9*!Ez#Vg-ONf}$f+zaWB3y=rIJO-Cg(AOM2td~?1*mQI#V?`KVu zuFsllYils^+B{JZNRNPa2VUg2bb!eTVU|{2@#cipwG8o8V%=@M_$v=aba9|0hn1+d zwiZJF`2CHiv#)w!L@JPLPgGS^L4Vlp1j51rRJr#~@Ns{km0_`sxW|5>&kY5v{rjhTA3Py@b_^)}`Y;pCZ@3RD4^kfB2|P(lU0pJ8Y#a=!#nX#E zW5irV!KgygN^oCSa&mG)q4xd%y1czKaQEZ~i)^_}9-N4dteK$xDxhyeQ+Fep^4eEl8XSO>MD3Xi=yS_{L7 zg0Onp_4)C@4y8N@sHfNIBO)R|l+88A2pIA^4HVnLz;AABM5w8%s8qPDplgjO(twr* z`q-l+OMoauMqY61G?N`oRf!oK9DFZoSLM9a4}a&jP#M1=iL$Js05 z`4Vv@MMX=9L?@@s$)<>dt%VZf+S6;Pfc}FOF4&l8i+T60Zr#l_U{rH+k+_^S6Yo;bk^QhN9PRKZRhhc4TOMmiPD(le zd>tqNj=X`0d(&(CL*z0a-%lWVrb6v=0Et#Wh&=evfk?QyiN?@QSuOvpqs0R3Sve$J z;cgJE9M^F9i$EFwH*fl@y{c_UrKP1IaTG#oda=+G%pzL_(qjgjkS5SKz}O*tr=XzV z@}*0b9!L9I)yMl!_Z9YA*yBC7<{zvDZEnn9J01S=u!4y=Iot^K;eXORf9T8uFb;xi z(*e>>y|5b%pWf)oiaJYYt>8Vymw^XBH0EMr$1ePy zJ}B%M-dl2#+pLiDdrUyitcuMq7RBPIO65)5NYMxTp(&*RIM>)u0NgtcINy9Q29j3@ zmP)g+puZc*P;&|w+s<64#e7$Kv0E&)+f2#8Q(Q2ZW6}4X%D`{}e`{Mx?VJO(b~8== zonRuU)y=%Zl_H613~+fTv=)%Q*Y^MXpy6F7_Er4>s{D_WbBRc73;Jj_0E0LKq4zE zyWatMYrZE-g#}!uzvL|~EW0DVKcn9dnDv2ts*|brR;Jj+^8oN1C?Ts%>fNgumH`z7 zPSf{4snQ7=<9YBcnKwK_Q1}A$whM>X@5yfb6c%e>8qM?D7&yi32CNPI2XS*b?Au{5 zgDes}6ug?IL2INNmSiKNS*2%eLHZR_!OCvfIK+v2JK2nGYY4{7=dmvGK;Pj>=*S4>Y`cYpgaD$b z85mTa3JD8SQBn#C30*~0?pB{1sq?|v9QLKt)Y3l4df}8{YMN@Z$OO;;@N{Ur&VB~_ zBf(=^-`Tl%ldlDUzGEO@LrOgj%otHo(S6$im;(qVUt%hd6lz9UR*!RB>FDSH@FZW% zcL8z$j4*+yOan2IUel7;ZJnW={v4=MVPRoh9OFKJ-WkYd06~zc`aa6}zzqWA$XBZf z*auz+e*OCOFZS{+$LG)E^dD+zh07<}e>x>DF7DJq6T5g`>pJiTU@KZSHkmm&E0D~* zj`u9qIQkX$@9QN2;RFqQ!@A2~=7oz(&ZCNL;9sxLF^D|=>Ps%OYYf?P1+y~@By*t9 zYGQhtv%*Nx;E;?mGtuDJxzr?7V3(v4d;?+(1X>I&yXN-8)*8)ivpW_HB}z zePA-Afcsz&2!z-f;8EwsChE}uRTeqep=FL183EuwZUVi9DT{gkejoUfGL3ekJbrm3 z7tzajkL>9lly$rB`j*RRAVFdZ<*Lgb|$p0Xytstw~ zLL5Pk;W>W#Gqz<7K*V{rZEU_fgXGoMCKgcEPkR7?fwNZuy#cN-p-0{NGs`kgKQ7`N zs3(e*dm7jC59Hf@{rpf!B#>emA)$OAxE)6NX*5Q9NB1Rf+=vQstiM9dtCKRK>xM)I z0kekn&e)g*&kG40TxhSrK{u2$TICw zephU~L5c=98Wsw~=haJ>ypQYafN%s3;t@(}gWmv7a7=Xc^u)xi3Vj}Nas5;+xRI9O zO%RSAM-)d3xzzD$7Lv|IxX2m@;SGt0#>o9I8XAj(FA?oULck77OG{x2fVS9;eSHO- zFW+k*JSr}35RRT|jsa8PBJ$CL$C9z28SwG(nWT_mGp-iq&s0*isKvez{+&-Yl5j?9 z8^@Z7WyY0OoAOy*%Xlc2###|A}?HBf* zz63Nlf%FJpUFb+9Jxa-M`K(_s4f*A=OmJY}{fus%7|D)n-QYDG10chd_YMv~1jP@d zl_`8*UewKg2n%fpc#!<9Tk%$C9l%!K9N=&Ca@vKZ4WP{!WFN;?A#fpBEzNE06-^Z+X0Q%` zLn;|T&4k?->FTP)wZFd)xG;BT4q#_|8ZGWP4WsV4JN6pj7RXfpfWX5B#Cqx(;r=t) znR@L50|N-m(pAC{2*?1i)P^^6_!#*_B_#T{jUNk$pWm(rO;8P|1s4mn z$27>+__)N07afp5VHGME`1<E+h!$toXj57=G1b`7X~TFlr5Jr)Xz(Zjb-L47hw zI3Xv3MQ4?4i!Jmna0?4}e#toXcOz+)EFrQIjH=KXR|k7FLSkUa{Z$^4N2kZY7g14D zyJ#nMt4WQFn0Re=9M!va z>7ZU;`}y^UP{QKB22Tvob&g4*5Y`vlO92L;E)jK6 z3<@%@A+>qI{A~8?T8I$V2|7h}JPmE2mOoLwd~orVJjn7Oh*ijiiigT+HT>fS_*X*^ zc8jIbx;cpW>sB2Aj}0H}Xo453ZZ>v+W#)8yw;eXPU5k`LAE! z1;-R{Y5`eYid?&dagoNAVZeX`IQ%srY5wyaaDxM!e`6$TXJ@z1kBI<(o`#O@3fgt@ z-2~=m4-nQa>mp&vBLA5^3$*Jez>iygWOMyRyx5I#+5r({2*8Qb&L@Zhs5#tnFNgDk zc+Tja2lUp-&{=}qmV(&_)d|j)B2k{`P#%P7`^J^bPXj6;80oiYi$o1>0Fj~>3p%K+ z3SnWV!AhKBta*6`m#=`G(2Lk8dR?nkBCCUJE+HhaI_WI`{j+AW^WrZ#NoNc5tLVh( zpFeG3sPtakz*~KHWr=k3)W_Lb+W}m?$$js+!#E7lT!{i!YJ&{Z+XeX#Di~n*Tr>S1 z1~YyHOMROYBBumy1A>4~$n-nZ-_-49s2CXsA?Vd*_Dt6&n{crxyM1frs!rMSsTs7} zGbkqj(@*^MH>j{Sme4t{33e-rV8rk9L_wI3l)reYqtgS5ABt@tfN%*LV!EAv?r020 z4Jj$LApfz!aI(m_Ll*F61>fV9pC4{8fWU!vO%aWGT)6^BJARZmOYYR1O>!loX>;r~ONTJ)w-BZZ^A>su4+w84 zFIWRp0ww@%9?tIuA7E)1a{x39#wJrE@vApJDabIHe+q6;1*4>*B7OJ}D$NF2P%#ko zKtb*So25Na5y*5O)KZ|HR{%E%icT6cV5F@41M~;bD*`gwTyR|A6KTj3<4uP;>L4tw zi$Eg2Dtg+>bKwn43GO4f?B>?i5TH9`mOjXefVAK}3rS0RfWc*5gaXTb{9fhkxzvjS z2%D{dKV`jv>~1j2V8s3W{94h{6)>zIA(A({fyaSMVz{5&SbWd`)E1HjkbsSDU88ol z4@!^(1Xx)=!o&jJU9=lfT-jHs9y@_@bOTFR%sm;V@eukO^AYMYbGGpw>jV%$P1~?hxb<4J=Ihap?azIqN#T7p^It;#M z@tmES0tgVYZ2Jg2MTVb0SR_;dN+uqV0HE@_2BZjb&@9wo((iNaH#UNS1JQf8zXghs zU{*Yq3Iu?d4Se47L)<9L@X*lEHuFooRodEJ#*dZ$p`oE6A#u;PA#vn!Mbp5*z|=Iw z$^BB*wab@p#f86p`*>@m{jL?k5to{&si~S;2*#!1+qdn7UV*DuuMUj^uEI@isx`#M z#zH!Sfr$`NuZ(f~OV9Q^FAiD8PMKY&hlOT)oqh?U;_R27k+ds@7=)Eu&)^U!79wnL z;jWv_&a25uQ-{gX(MRLI{7}G+pp_)$J`!yQ89$suWwC{wjZF@9V0NXCpQFO4)Yo=> zZubzv7%q9??(}(_fvPOh zBsw%WILcPG(LqyFvsk!9bch7I0Za(Qj&qzkp-1MTX8KKh=YE|KMsXo1d2ha+b*1X4 zHjpp}?$z|PwIocT*_kUpDmGT%7;AzBRy{p46B8f*sIcVBP5r;u#PKfN+&{2x@D0hv z^#`ms3(zchKb!(P)*b5M^=S&p?O5yzu7#yrL0S`6G`nu~9B#VfnSa4-=y|`_ud#9B zo%5+UuN4@Mm|rJH&HPUH4z`XFh-=Dg-s7&G5hp8h3D(sgp8vJQ&(x9u$_v z%XpOIc)gc64;BEa*s7CrZ4L@;H|T?fy<)C31L6WGm>o!l;H@HSkH+w7{=y*a1Zm-Y zRZc3*WWE@Vw8C1Ic|J*WH1;_^2Pgp*r|Pd_Tf>_-Z-Unf3m2Sgb-V9`SI6e&AidPa zfVl(?F4RFStVGj{VHq<)5(b|E3J3!6I1d=y85bb1YQx6WnTau9FCs-3^KYhV;aE^| zs0_WNGhdw`43-f95!|$L#iKi2DN0lrL^W`Hn8viUv=gAKFke6_dHV-Jf^Gnn0@Lg9 zTu178&*i+s*@2Q3xU(EmN#4q%So0SnczcrmXk%izhAaey`)s2>>ez+*q2we!To z`@RpS0N_<2THDxQ5VJSF#yyHvGOz_-8&?AHuNDf6kB@)wS)_z$@S-HdV`sU0O9vEd1Z2$bZIp@ZwUUKI5e~@2|-OvAisj)98L-_k8w+C&) zp93OgXif||fZ+jAv)-yG7)E2O?Z=6vPtRJP;4SUES6>(3Skr3;Eu_ZUJ zM#w0VF9c=~HZQzoCTQv#rB)}OE+2KM-21KrVZgOJ!*I)K)KK$9{CpKcmc^ZukW{Ppw<&;)_}+{MskVx>IryKH2&}m zBQXpemA<&;TFY&t5y}>NRab$+gQv=LN|1@thB0O8B%L#1Wr8wc60`rf8_r4uCcew+FbH)=&=^;3$bQF3jY)mf zgk!M6MvtN0y*RNvD4_x;9w@K~-KDG(gx)9nRRrNkwlJBAaKoY3HsXa;gnW;r{_!j+ z&qIvUxzgvys$=tXN^T^&M!&FzzqaN~-IT8?zPd;0JyTlwS&n~tRJ`J{e@1)R{#Swl zj#@W9AMLJD>igt6lcUs~e%}%cGPmGS%D)Uxd%W_zRYro)mvoX*!X4*(%i>f+s{O3f z)Gm016Hr{J_B<-rO2^qI@Cq2(xBO72KqQM@QK*YqZ_!`BALt|B^Qo1LBUtQEkkjO8 zF*%0L3#eM%y?eL9{`mJXc)_0NrHuig{GC|_vfQZQ$IEa65N){ zfPLcXNr(&Ah=&{w20E&E4wV&FV ziBw2VHz@kjY(e_xi)S6Dw*Kww)VTgXD>Vpv>h~eaEdOczOeLRi{Qc(N9k2fj0d>2K zzu*113GG+EUux_}rVjn>O?*2y_M897Un#Hlj5ve@l#E_zs83F>7|VJ8(&n^FTJm1x zuOIeaEGFCvy@Od|Xg2Vm)7Sj$VhgVWE2@Ir?Ym;0{r7c6@$h!ASyok0CU@6AuNxj2 zDaeS;tCYa|p|!hGgA}f;NB-UQ`(sDMcFa9I7swi?Jfm0lEAahx6gPG-EH)1kmgzi3 zcBI%+T8Ezf$b3__b?%tLe>B#hnX5SVfpndqH>+NY(FJPz>vunVX<5sBktQ4;K2J?O zvytFxE2JD@e93qB8@^l0Zl~@_mrPP`N|R^#M@i{E#(1ZIkbtEtHqy;PQY*Kmo9*4% z<{R>#6-oN&H#sfY!_6Y3yg9p^Ka_Q|4>@Bo|!e@vZH+WL_9{5Bj z3Tw`Zsi(f_Ie3>wq#RtFB$@m(i$+`7k0MEQmA+^ySV1o9hbYJ0Hr*cNI?p*?%cAAV z=xZf+PwbDyE78Ro8D^DI+D^*UMKn1b?}-|Tz;{LVtzMJBXm2H=IoQ}ebLlq z!%AG;_%WC|XU? z0Yj)8%@j%)c4UZ1oRaazJn4Ca+*VJ5BAM3Xg}`e_wX?S*UVkM}Pc^0OCB&AYxV&eQ zSoL#0A8v&*v&Z0vP7oWF?tk4e_HOpx;g)7Thkl9@q#?kv3kH$;=#t3sJYTg3M5cP( z+5srX2bioH;$y+U-m1Aec|P8Z8!6^A-lo)8t@fx%a0Zn zyvF-)hCK7%mmcdouc7Pc1jq+;3x!2SnhM2MR#t-27yMbu zfr}-Ff=X7G{|XB;oW~IdvZ_8C;bOnv!9p&NGT>q-sSWN&f&D+I@W_p~SLXK1)ZKtP?~uINY*Z zUH75FC=Z1Ls4Xl(Z$UKj^1sL1!|~wUu0Z1rvAn;%EiQ;ZqgOE5<8YSCGAClAW51OsOd%hH`sR~!o9J5X*=P>F=GD_<;&SQN zV^Z_3@SIa88q0q`9Z+F*zS5vYCeuPMdR1!7bzz@RW+1p*X(oq;jMJ90nM4Z9?Z{(E zTjZMbvR7}KvsFap`;)=vKQ}~$U~S@Sz9WiBR=&@TFHe+^o?#rDr6(XG`2H+MvtXgL z_eX8Pe6!*$M)k86^TE^D?}GSUfeG9>XyKBFkJClzkq=&OcOVo)eM&VrHu^BhW7pU)$gr^LPD4zGW;8JX0 zkQ!iBT4-PuXOOb+-pobNv!{8-2q->THTBkJtc@VodORP$RQrZeN{xajkEZexVN)M! z1|>*XE9`LEL>Ad$OmhwS{B^4q8NvaV^vBx*Zdo)DV7J@!Gix0+$#vvw>tKbe_1#Jv zS`%kWfu?v~olGR?la_^B@)W)#)I5Wgt_8b|)8;P0*9bd8DdfBS-V|M~`0EMtrV!a> zqUe{|B9fOz@z_!f+)Z*!dxljzq)nC&&|JPisFq;c~2bs_}VYdOsu9z(s;8oZll^zP9c2Uw(t1Pq^6Xc~hN>N;x;Xf6bqyx(=EU z-Csd@$&+=U)~LjFCQ*(~{}8zeQcZzqYB!AcSeblRAWQuHVbRSN-L+WM+7V9_A=5Gp={(LkQGPE(!?5|7` zYF@4|eV6ABzER@o1qGs})-fjod^Yyjrj(p@#4{NQ{#&#@n#TG`+JaK*L7xT}xW~Kb z?Nnlu@--S%EMm{(bap&tKt`KIlK4#!Tl|n-kyXn|w@z1|5gzP&6TSD;V+b^&(~Aa! zpdakqQOi}nhLwg~B-*0C(JWYRNl7aosq9DNU+d}N9J0sZ zh|x5rRl%^+x)!vI749vV`IsKKk{TsQHW*J*^Cf?0kx~kKzEF@aF4}y-=cO(;Eoz>$ zk=S}+Z$^eO?}v3ze64j6=@H z!`T?vKismYV$S^jdj&8`G_NLh8uPV`_AyJ`dqu*0K3>e};z?oz#duc~8He%Y_qR~J zOC7@PTY^7-chp8Z{}Fq5HDkMnjVxY2fBfL7{r3BFyvY0iLowq<{zDcsS4LVl4#9t% zo%r1g;Ogr7m;B-3A=Le}>G{w8{yK79KNK6RFacFeIToBxzBb+f9W4|>&?W%QlhF$Q zh5DXtgipj8Xp;J|HFDyvF*DL(>kw_-S-augZ$iSnOM6R zkj93@#eXjS@$l>3(w~R;f1tsRPDif#VP8`f=#q)qPB&VIXFM<-B4X^?;i#x^kk0kQQtDki-}WHHc{nZ;-OiaQDm)(Bzc3S)Jk!mif>EOQUI%CB29~Uq zW3KP_PSsl+(X;iWZhiiSb@XHZc6Pxhn76%O;>)xS479r}&p$#oJk6 z9^{_)r0HtC@ppoi$6;|~%;yxUM$4`$PNRZ36rXrh8JDsm&wEd*Vy-Ky;bY5K#p$Hd z+ypCX3(lrHZ^86Das32i1; zMX~inKd+aU7R`Gbvn$6KL8@id6Zqz#(V!fUOD!*YztZ@E$ma$sn|2eH6d{Jn@wwXd z`DO)j9Zw%!b2mXoiTI4Ws}COG==XI%(-E$21be@r%ST;$s|z>| z)Kb72d^jrfXQ_N70x|@KIy5X*g@$5*YC_WxIn+vlm%K$9?quS$S$-O&%TKLPpsPLfklAl=A+$drYaHipy-}>g%4zqDV|KBP6{zvapo-N4&+R)ZUtc zvHnVpmZ=3(Pt&CXDmHy+gS@rmW{XrSrwlIlVL#~Fm6>vspedrhYT13q_}=1c?%9xN zsZ&wbj#jxX*35B`3;0h2`+H_&!g@YTOsQhtoB2=m zb4@?ST-gaHi$zK2^i1{$lWAVi7Qk!=6m@NQ@8u~(c{M2a=pm$^GLcaQxp zzr|G=ay-_nZ#sOad64VUTAFTBANaWkeEOz4=SIb8*#9gfK4uA0Dk(Ir_}yCGkXva! zs1Q**$%^0EMVC>6m~CBG==PmE&?Ebn0=I7mYCa-%!-ud@1`6S2IG;tIuOyP;A2&nK z;V0ei!1}lt!D7?<<-r0JH@MW%WarO=3scNW1fgbF_T22+xz;~W{zJTHm=u;IFm)R+ zXXoOl{9x-K4UKNT?VmiwEwEli4vNbYEMBHab5qkGCduO42Kc`(q){~)j1>=A0Im*h z6fqlYJUmZE2KAs%g_}@4NNxX*OfeeXt(1R7l7rbcu(uavn}l*XJPR~($3!1Pv#&z3 zCWsLQ$$!O#scUb7?icNV^R09EGteEVlM4Spxr1Xs>i3oZelB->Y!C9`y_D-Z6plJO zI-r6O5U@8Hp$`2`(2TT2X~*{a*qR9S#Jnzy|&BI~ga`w(3m#-Z}5 z?|tsyA-@YJ=aDOEF?O{UJ%nSNxig1(BB5NH94wdg}87LJEhM;;= z${-095Bh}V+H#eFI+@Rl5mYx9?q>|K^ISI3aO3RizGm<;OzcsLMpLEG3x}f0Ql5@I zhM-FMT0obHvudo?9G3^sjM6zps_hKOs5fSGUmZ>hP(-C7+`G45>e-E-t<@5sNTO+8 z#LDI%SWy#oLWEnKwTuE8D8i*{#J+6GugZpvCvWI%h2vXY=evbVtI;R~>o@uCje5)H z*=P=BFt*p|sh`^2n(Se~*O9xhJA(ZsTJRT^kCxH+fi**`!)*3k`liAJ?Vd2}`3pa&v@vtgXNza1U_ zqt#nx(5wBz2S77hip?%+TYP<{7mN5qYqE zD`06Ym~>llEsg58muKb3(_P+qWv4*5oXTj4l8t>N{KU&vF z1eLqaP$@LvGuAfuP}+NDe1Dz3vhfDaP@mduyK-{02eu1rW`Wk@%ki|ZRc}-%ZU=U7 z#zX3SXa0vNuOi>FBoGo1FoWL#VABx*#VyA%@5W#TE)R+4lZrI0f2`QiweeZifOqfS zK>?GKiwmGK19-4A??@GOmuy@lY){u;sC6hu!ocx3rH*(yYFbRO0U>ZM+!k|WETz1fuCt^1BOK%k6Mv9+t2pf0tx zdBEOG7MU-{qq@4dX`D^UtWLBsNr0X9Dg8vq+b|lXK<IH;-yurSr?a>#JooWfK+D{0R zJ)`AjwXiP9cINoO!x!~KUmmNOkRA}=>E2xZsLhHTr6i(^Bsoj2jPJekD`LbKtS?+G zcN@H`#)byyVuoE1Prn7xLCu7JZTe5Th$ICs3flfnfoei^F-85%nKMQnTiwuP0eioo zoiNJgzuL!H{bieFCU(4M&Ix>|hLJyR;or~idb-0-QX!}#fR|`)W)`dZ2}*D{`%KKf zW@Mzg-jL{brdXYfkgdi*yAG~@rY+H@$u#M{b#L|&=qVwx`}r;Z;y4ZCQB^-UX{`ye z3Gd$3e{pRsJaUaZ$P7;K-F$iNeQq{w}6uW z_>x>+UZv2L5XvHZ+-H%{y|Cq^3^QJ0JD~GFqyhKj@aq7uRq8`@G;FbHgUe>H(U0+85>?h1OebisIU5ui`ckNkUI_B07)Mjg#xpAtT_gUacT%8mkJAoKmy@y) zI-66*L<>!AL+9ULvMOMsCsKPDGt=do_%FT(iod2 zNcOh)?k9BfBkKIE@j>MB*o6-$v}lfYMrz>J-tc#!6yF7otOvhxEc|P7gRaxMZjtLo zx^`cQ`#i&*AyI-bCd$jEk@f$17@zGWb|=#%H?iT$^tr|h`NOfS;$&#s0~QWs>)GVo zEj5CsSX5)Jy$DiB?zz>fSnNG3hLo>8vZ&QJ8H5E!cEk4GLs&xvRBjTX`hIO7trQ5o zdA3JwKH{GRl*x78{Oee#thech*A@_KT_O0VR%*7eDqxl%Amp@b|Aq+$6#^ zs>06bKr6;)D+e2;x9g%9=5(lprB)hu(U);f-GkUDHzjxE(a`C-=GID2q}SIEvbplL z_79h5&e^TLzIhhgWgwH+k~!a5x!kb-kkyx)E>%hJFM0Rz_pYD1l5!OA8C%*vO@BDq zvY0&+VfZcUz1Ig_UK7ldBJn8ep*hnc);#Irt^79+vH4_JNlmN}qme%W;KXILjM75nuA`Y(=mS>4!l>FWMxAA z{dZ@RLrQv&SU zf=aKmLKAKYp#}-me9!#eJQXnJt!~z z7nU1m=!2)Bq74=ve5Rx>IC{&~_UVXR=JxR9} z6rK$f--90u`CkKIV&mt}pRmD<0{7Dq;P5Sg)t3i22lxv=IpX<$0crREv(YLE`xYQo z#f#c8$0OIq??88Dxb1&055x+#p}|h8ts$3@VAu}~8{52DE7!g;K*JfZ&#P+}|E=8Y z#g%)x5!W0I{LZ=`r6Nv_|2!_9GUfkU8NA@w)o_eWdwzn9Kx^av4B@t&&jlq1dw`RlVby>l5S`J%s6+K7HO+B@y zj_qHqCyy}`dmEezce*;nSEEIQecJ}Muc=mTH%pBY$6HkGzosHJ6v)QP_sR}lx|&WS z8mA}VYBDx2j)vBk0RpAW8T<{M+-$E>0}?~K3f8>g!K(u(2f4V96h5_j^Z4>-XXYDN z`*H+6u|~ZeEfZ7(VugRUNd(zS*c33@v|}8qx%$Hz*HzI(nEtsoqB8GuEw4Q8|6^@? zw771O==jk-f(hdDX!kJ2v*f_H5((V~$>Vz_Helleu9>2-5pr!YR3|0kLncb$S7P8L zzkdECFeGRu;k%+=LZI2hZhIcO(LovCiEB7V3!kTNs!@7xo)D2ji(zre(NAyT?dJcc>Dd4NyQ0ponYotj+@bPY=@ar5w56JqP0J&^HX}(sawreSLv{5}F~HULufKS=#mIpOy0+qf7m_HmR~G&VNI z{lFA#{qVN^cXbP_dw3zBM1yOJhwcA`St^Q(V$ezsg9t|nXVjGAe?H93I+*DifN9`qr$wo)U@(? zkMIrbD8-Vqz0;<;on$)WL5DX@7VzptbE`F7e6D>v-87P{U3-<%Df64u2_quo23A(T z34b!-OorpemlO>ll4N6{`9a<(uLqCXC#OC|7q=N-h~#_BZy~lSrI}6(URn-9P7kVy z=w8e9UIt`pxS9TwY#M*%CZcE|w=J-t^SeHX$$i}NcIAA3xb)WucNUko>%G{8U=gzp z6m_%Ebg)@SG6tfT0Fm{C&Hzi1ddUd0MA*CFTGap0?mBT$Dw@W>$r5d8*I>=uQ$VK4 zP20yRtt=f6b~WEYE}E*#sZN?K984=NDXw3n3eM+qI;Y9Q>rfUx>_*s0f+npMF-4); z2!f`jQM!#M#+U}ENitPdvi6aqosM;&5*h9TzS*sORtxV)@ECMz!+HYc@~hb z@@6ytd{G;*QfunQ4qXWEnSaV*?rFp?*2bN~8P#fCl5|o~5Qu#8Z^yx1Xtg3@7N9Um z^tL$f$Lqu8ILoKHDmGln|B(70sU4XmO751bE0pr-lk;2$5Mg{m!h_vi?kHvS4ERUS zV5cvjA9&TO&|vlM+t_a*$onbwPt-&@>{ZFa4-ZJeeoz2kD5ZyjrOgX{yzpaIySs(Z zYzQS6T!%Tv`Crg%!leh|nw>vgL>lP4))?G=lZL)OMf$(jGbkoDpyQCnLgf$J|B>U~ zn*eKh5q?HvTlr*fCV|rGKPCMLJ9l(+w8(GgsILQz_Hb+JKfHP`+|C`gzK_WH5M+S| z=Ik#6E+M7Ouag3o5r=2mJNvt`Ge3j+bp}dzj5`gb@(g39S3qE`wM=Itmi$ zpdg?K1hCMnQlx`Y44{HYjWiWes`M&FX$sPdAOuk9#Y8}wG^s&aOrk)jcW1=;=3D=# zd+)jrch)>GV+<+h?6c3W?ZR_M99DEgwI1x*#r})sn_9ciDMJMk#-dywJ^zD}5*G`-r4}+RxLUzvF{j8hYx340y#+vt<5{vmykn~S&?G&8ijW+}JM4Y(J zpZiX5W&w`Px^A_nP+R7ECI||I4ex`70hR3B1da%K$k|Z;w0)nsj*oZWq5vG{3j&U< zx~=6cL}Uv{l~uynnSvA=sAAk{^Hs~OUzVe5Bg^-lK5l3=+)**<9pY4p>y0Pqh(Hr1uj7T>>E}AHwx3*5_R6o}=bI1` z6w(X0ZsO$=7!YXplw@$?Hhp9dXI^~RB1)1jN1#DkqV@LS+lITN0u?px+*?mLeQD<+ zw%MlRwf&e`l12;06Az5@$DqRHyw)+ zWNYt0;rL}mZN^h(j*X(9C-cv)UaXHn%BF2>$~21c;xq5^?Mb}W>s@tj2pzy5%(KTh z?sHj3eH2kywozkJXykRb@O-b;# zkag^dY)uJu(>8saZW(HRLGkk2`JPH^Z&IgUI9il_WRbI>^^NA)tnbnfuVZ}YXX~Q} zPle5$J7|7MvARG`J*amwi9CHn<2gq!L#Ja}TFg*}Ku5`&@5qz%`E1MX$o{A#y5gWT zKUp35@O1K$bfb#Avyk8kLPJ}sVL}mYi>xS*0$xmJf364lSvq;@v~CYmA(EnhhW3;L zI(T7bS!uSkQ5eT25=-Df70sTqNR#uw@%?AK3Q;||uy_1dAL7@*xB;;%k+MdnW2zM2 zRv+&e89chZ>>n)p`i;!(6Wo0$cwR?Q{PklM-7ueJPsy7Bmucx2t{-nz`E>CmclizF zyvxdSv&|BB%G7R}v7SY0Dt<~Yf7pk1bPasn_GL%6&4s_h)7j%#{X&@8Y!A0TDSC)l zU~@AU5Yjs@(egT(dZH!)A5R_wbV1ooX3!;>*VUNiC_?_ z@eFNv!JDJU8(IvIg@|{f7cCpxh2SC5&CLT7lfz0RA%|;3v&I=TaXs}z z@TpWhn}{W`iZ4#cB3)HF#PF?z(KJ!C)-y}m-*)q<@d{gqVCuJIrYrDVS;2<$Y;+p@ zaoQp&Ohz)pjdPT)oaZ&h$ndPv7vsiVj3xSw^<6<(-KA*(=4lmWWncRG;uqA11}|he zU#~1CbCoBCKg;NhS>d|cKfuDrZZsL)BVDgOKg6EMf1GbthTWRnyw7&AJ^uINdEn3> zw_FQ3l~fN=6Bax^Ly|RhW(T*|)BTSV%Xpxe*6<2jY&3hG96CNbAF7-IW&snparGCk zOo?uvaY42ga6V8&gH$-tJP>+>ECUr<__0L7?$PZhyOP{7=hDh*@!k-h19ezy zO5bhe$5?Q>zW0;u$H^)X2v23_i|T7BpPU@Q9t81<{Qx#nQ{vWp?l->LUiVj+Fm+tm zk~|lzGzuin5R_pI0Y~@n28QMa1MjwvJN*NM(KPU!SG;^V2G><;@?xnAT+Fnzw8Fx| zn3DfoxeR`6ZcyOf<_u=uMR*sBZ;01S_u{v6WUrjU6yo?b6MlTYe3cMemaMyABb@zN+BQM~SU9c~oa>{ru%~w@b-97K!V_-tZVq zAO?+z6Synk1yOw@#D=5c=f?wsb+!qu7eZnRerKuq0p)K=pRB1~j_FUrqbu9L(lHCCOI zvAWjKdQ0<$G|9Zas<}_H$s8>xSYZ0{C-HY>aUY3^%8ZNDf%3z>h~>!UQBL|ekD=LH zV;Pu&Prhn0a^&TXL|iiOC(k*>%|ZXiIXr&9j74unaq)WD=Ce_ zjcgrGBcp7g!XhYQ37J>gpCZCZEiFbe8r7~VANy^Es zYu8lj2Z#N|dMRgo4SmyQo;{n)xYc7&{P~cFZ$(&!M@vISaMX&9u(kTFp4j3mrTE(( z!}nb^zHr9bZ{=!q6cyW>HBWOZ?xVlMUYny-VBNsy$Xf2-)#PzA!Lk#bYP9Y>xzMzN zlPxa6Gx3#-1R~M+fJyu@Wt>zM39ZsexyO<-8*G^Boy!`)T#jsX|G7pzV`K4jac#?C z5=Ot63vsC*id6LUSk|E!fy1J|irpSj1(%cmnQI5}$O!aY2H znzyc{Wg7l3K#W*v1EN@#z)3!>#-B22mUe(UTPH0Qj{-Mk=fbv^VMo6)+A_@#FFQ(s zJTgx2A0GhhF@yLa-L|yNpRX1c7O1&i61@QV!WBDl2r{020?ZG_A~1~|S=)N^jZ(J> z=1zM#FYfJ{uwgVC&5)m;UkK4%@@w;@&75959?YwKSf{G**d83Ni^Q-dJL%Ow3jC_5lJE2M9W=tdClJ9J=7%fp zDw)`@bk7K^MpnHcY2*X?SFB4^CAvY(0<|l+Wuk6q>3m{WMrEx znXrCw_()P4+sW=Ik;M2QmYzhNPj$kD2ejJTo{PW8nzFu+*AjF#-*XSIcc$;*Y{%fT zyzR3zH|RJNQ=>m7Mg%Q{Qamxv*^{l96B;~+Oq$9hMxalo)$wiOi^V-_98d8exJDDc zs!mlykNg*R&oQ(I@yjGf$~EMx!--qmu6mgTo9D@`*jFrnx4I-_%#aev6jMN9xr)(` zoFC6(YSkeQIhFPK(bNXV|7!Y`=_wx)BjeAck0eMKMK+Q}bZf`6g1;m#%ljMTddlOj zbD|QUNEm01<2(bHab&t|M$_#^IL`HZo(=JSt!vV?I>Em;b?O#(=Emkacm(fH#Tq`7 zTQWkdPH=6(GSr4N?&T~JhZ82!TQdH_iR(!>@0s?wA%YbVwFUDoAw_o;4|r^5%(YMN z>niF$ z)YY}DqCzT*8uPSS41w5{aQT~D0`>VV2M3@rET$>j7xsBN-8uy>Fh_yn7P^2mxS5%K zuU@PY#u+sq_ZT|tw^E;-o!!*bBsJb04_R&>8SIT!UdI9V26mam6ipss2@&lIoY*KjVbM)hNlH1uK?8nx|3h+ zmYerReU|UszAZ^-4T2+T?`myZ6Kj#+|sK_Q`jNdgDj>Y>L}Cu|Aok)jQ8iy5x|% zVQ03_zeA#sL)U;6Z)413V^}-aIe*a5LR~GlEmQ9HPIO{_qC}yFfpv1EeXC+kj3dce z&RMgHq-oG1k+z}yJe#LCQ7W{fejLh0;)e2p#Qw$Hi?u{qGU7~yBUw9gi1LNwj?JRl zBzioI8R2|eB6tiLiNi>w8JRtKWUMsXhGJ<=F^qC5tM=q9ltRIY&&cwj;N%sfNV=Td zAgh={&z)h#KgTH|x+R{eN^OR4dHBVX=Dbf>Ez*oqVC6a$BJDqQI#srCQr>^+R4QZC zJ?6q02_r)yL!*E>aY!_?)H5LM1BtOv6D78MqqW@i8j0>n)=_E?;9KlEEeKbYy-YaE z$aQ=v?mF+Ozbg!{kxWn^?;-DaY8ob39^sVvh0(7CNLRG-+aqxwjUk9hf@qHT8oz*A zhl_KAY_cAv#5)W@EHB`qY}1sn#Ywi1#0>k56x;TfWIs6@v|gu;khKjMtUE@AzF-*R zbh$gh=JrB=UE`%s_7i>(s^sh7t-SShp|K&$tGkl>`7-ubsCH^`nN<2D8k=}To|J5y z{s41S*vsMuVwod8#lu3?7ki__GBkMVxDvkIJTq6=0!_-XVl??eVjS!wgnU7T7^X@X z35`l&ulJZ@#Qu^KAaR#A}jIu?X7Bn zSXlK2z1;ojL=@7c*k~TMB8Rw*Y8Kz0yMj{JqJqLgSvR$~C|#{x8zxjKOl?B9{RvW? z+F}G_(?$y(7F+LsrtWxvVQ5oYUoI)bE2mzLxn=IsG6!TrY5o<+j5}wd@1=+ z<%XY@s#+d-f7bPyHKTzEKiR~1p6H403K?<^u~jC9 z-}lO)6Y|u7;|nn*0bg5Btv7A z?kT_-hut*8z>to_<+P)n5YX~U>-kOACeTXE&dnXHH-d{I_`<-!zXCZjr&;NA(UVnw z__!DBKSKSeGbNznJ%^hMc&WyTo?@)Y;*&o~XPx=4+SnF7Tb2G0Hd7~S>U7cR*LX(RE@Dc8pR|I~- z@8Mrw$8D|a?B)OGYlJHwHJ2Xlvy;sVn?!&$7v{ZzFwE$*``5(~xmn9jPbKuIVqyR+ zHzD0iRyN=W-0C=`9L@Ir>$d9WCx7?bdq~)6sG$ZDQ-y?(izIyZ%yxAZ8sAtWmttjZ z!QhN=xupbKW2lsn5Yho3s*dy&qgJ<&{N+EDIBl7L169f<>_TwWyFzv!WSS5m_t710 zoIo1-{HxT%-By}@;q9O!1e_Il;o$81ICfX*<{!1F`8|s?SmdWHYmv+$(q7XYiDbCv z8aP;`T${bM`E`?%UM=d9dVn*cBvaM#yr#XN<_*P1d(@iP=F~=F;@VYXz&Tui4sU;X+V#24Nu9Yy+^mm5;`(8)JVEKhleP1H6SBiG8_0mknnQWfqE()?zuP2Zc{7o8 zh@TJ|G5Yvy^ADqbKZ~mgwymM-vqY!w56|J&8w{Gy$R(V&O8#ri+8Zk(J1y})@WHo# z_Q#$M0y9zE}G;6z-df<>?DkimxI-sQ}N zb<8<9=EC53ZNeH3#H*B(;mCWi((+H&!Qou(^GBE@@Msx72;sG7Adm&Jy{bGmSDFZq`cIHcD_oFYfmpe4*CMd;4WDaoM(m_^B$X6)b6wUnD z-OQr+<*e`cfU!99Ot9d?0?bF&sSb}rgXLpL8@#N-VpK|nLi<=se9ZY0b#a2`WmTp2 zHi_}jOwG`^NEUO^%r#6@hu-_m?mgI-Zxs<8U1aRwB!<2Kqj{`voTMuxJu;(TI~Ud- z>48gIYw-w6tMkmsFti!*i!2Fwl^Gma?P*SN44%w*ovK8_;!j)bL&mF;5&J zlzRT8dKW9rcCzI`k~l0q7rccGnMt%y*C^%h|1M?JKw;;|dfL)csa}h%{S?@caD0@y z#*fs7_9%znIc7e_pP$?m6?*SdV)O6*@<9o?R=zMNhuVsXLvm2PSaQmpJ9lF1{)Ae* z-Z#>RKP>n2Gn~+Vr06t&4rOIQTVwelXuB-x(-Y>%Mk4MIvx8cH>kV}%Ht7$GQXAlYmg3kV%jnkT^2x}+YQ`124|cnAc4`K?cIrFo7ArFKFRminh{ui1+#8(%*! zYisKQ>NpDYm#ZqVe(Lsg_GRq?^nWpAm+jI#nO+&X-&m}ed;B7&E8$FLj#!f#J&%aJ zy*u2nxz{=q0-utm0-E%3#1wiudjZX{1~T4!!ejar143AI9H~f;5I>b86dEX^aWCKu zf;%X45?S0WnU(vMBkl^*;z&mH>D@BcHj%tp`U^xiw!^N^3Lgt3%RkbOnmB+^Uiy)P zlG1vaJLESa7H|`PP|u-psybDb#!7@(H+k`j_;hsAjhhy_KqGa?7UyAj8KTuDZO2nu zI#4w~$}lnNO&p!E9u|ST3{K1$Wf-#E(gBZ=83akZnicCUk|QWjGFBp;wwU!~P7p0p z9cV5M9Ovb_hfhIJ1T9P=*BJ}>m4x1UayHIfBqm#VPWmtl&Rt--zfZW<^HJcc78Ogs z$5JSXD)L6RvI)QN%sTJ?ZqrL-iSAnVIFl*QZ+?im)OAtv&mMZ!{0p6`kiG=)^*d4@ z{!N(M0h3W|e=8e{_|-G9IoXoc#~+{J%M!618JkR@Cp|BkX5|c7!wJ)tiX=u_%59#K z`#hbPRylh^+@gG(e_3dTr-6nuLG9jRiah= zF()?CiT=*7M09tAazemxcohk{c`R1tY<5m2yG>FTU1`Kx=R8r?Mlzy9qaGn_gHNG6 zX3yK{_MxH3nf}l{M3>vk*Jfij#$61IpA%qd0;vh;%%!DNclzTToELL*G%iQb6u;mZ z;QJaq@pOMeSo(D9{$d+5l5{4Fh?(S()S(ycxNa;Jv z%3|*mQ}~s0qvCXGe&k_psNci~@YJi$|9sA3lYk~)#0Jt`7RTxbV956!p1jP%*atnB zmh+Y5`^dc(b}rM(PvRjj1Hlc?G5pKUVSSx4PfS5bqj3mZoC-^>Ih}{Ro6*k>j|Nm^ z7fBwdN_mgS@%!TmrHjFM`CV*{!iJIeuVB2DX1`8MRtqY>Y!oh2-8-Ff@ResauhREm z!(xd9-1x69DC{HUF^q=D;RvJ-mAXtGXY$!6?EbyubQFHA2Nm)Z?o%k#Nu)f7DXT6?lv#5h*g7ZnP|I^+>1?%K%s&~|$wD@?di*1)Y z{av_~Wr(aQhrB@C47Aol;=I4>&4b)%cOTy&=Vtm&DqcA9d>fp(a8@1YcpFoK)^oe`qsC?JhwVDbM2SA2w;4}VezjpylilTlz4;Bs!D@3Zh) z2$LGA1ijtA*ooJvUep%lXs46k+8|8X3gwhrh4?m@PH?;NKO~H9AO6V=YT_2#;ZZL_ zjnuGfH&j}PEjt6r4**Nlj7w}w8Tu(_1a0r#0-^`>)7UikkGvqwrnYUd_II~GIQi2H zhak zSZ=aQiHP7Hdv+HE_OPJ4tIE|gVsq|dO0!)@crJ+usJ#tZs+wH+R34Vs`S<3MtF>3N z4;Kv@cc!Pk=h1P}iSK@TE;Qj-8+k8zVkM1T@>B~doGT>c`ZbF_B{84%GVccx%1;E@ ztaC42ii(WY*|_d*5pe@HqeLyYG%7enN{gPxGn{bSbGp>BFuLzx&b_8+w8p-9DH$*M z0kj5BRb(V?M0JuWP9%y~Ai^T^%gR;yL(nE6mpO(F==cd0s@gpPp|;Ey62Hzy6lw~P*#p{Mx^p3OupGESSpu1MnI9G}i6G_=530MS?drUB!aQQaH2wBL~)YbVk1~Y*ZVsl!m?)%8V z&=3Y>hoCHSThsLINIE+QA|?+7Yqclam%Ev8XrZ*akAcpo2@8(|ls0B|bv@};f%BFN z&<4D0%)i(C3kBHnFz{y_K#GF8&HE@E*x;#g^bow6`^ZIuHT{rG-n6NrO%zi+k)2ts zP61hdwfR={q5^&=ZGufxR%%D};$htS#kI}51}8t&ZkdIE|0)fX7NNy*$6~TSRBWhUZG=~Kj);{G>=hT zzg^%h8JFPDbV`6i#-8Fuy0r(>*-TC#j&)S+Rk}ed`pqI6i&2VeK_4|&(N`>S3~qNJ z=yf)dO*0Fg|0Inn;>|JX!7*{7Dv`Ed2_Dy4lGQqt95tn-vp@QPjT9u6Ib_kl!BchS`RG-yZINQa=VEgt#;XYWyZD)L@B}%^l%d$wJyW54@ z0z?CTRRVMhZQB2w3`DcTxBH96+Jp;}!5ax6hP-WC4B#FOC3$VwZ7skH6VI(p%)57g z(^No00e0_j>lOr2u9MTYfKi^R81?mPME#=%gS!YdFdBwZP(wRqz0x~p00O&X9G5Nt z;P3A>cL7*;?==X$)s|h5TuueH96(|iqg1PV^iKzmfGZ0GQTxp0R}6X4)qBDqmwksi zHeO-2EA-01cq<^=&Rdw zgD_xt%nC3hmGJr@J#gf|QIi(RGEN0-Jzh(v%p7W<1QAhtZeC2#vB=4#BY4oQBx3^y;J_^?bq#6}k3v%2f+PLi84;h35^lO;j5xe{ew@?8g zYg)ZsnTgcBCrp_C9&t4`XJ;3k_(6L`qw^}YSr7a8$?SUN9p3nifr*NcXFCqtWwLjP zwREsr4$9nqa7@03K(LH58%N!(jKrJdiT~7Moow*-7 z?_F2t|DystP zw>$})e(YnO%Pb#`O{iE0y<|XN7Qcz#%cgu&o*97FYpD_j#pG2v!h=chDw2Shf_r)75C>;t`{^i@ybiJCT=Mw4kQjwgW29l^IuovsEoWvt zF^+MwNwmZ>JEp|7ku2kX^{)^-d!2-31Dl8rQQdi8yCKGNT6P@0PlE|3?eDgr#;7*- zE*LnuM{(GBMC{wYH};~w25N~dr%VI2Hv|v#aiQn6@fv|l&#n>y!@oEB&tqwUM&s}C zJ-xkvjy~AAyY2pQ`TrlFq*Quwx(TPZHb0>jIoa8%fBr8#39iNOj*LG( zs(pUQ9d0o-kg!1Q3B!GVX28Dk1(L5~*xi)yXq=8YGW zy`-chU-I)&@3p~EzI3X=4**RNq@3zT-t(!IBt@gi*{*Qb#{4RhPzs1{Q`J(}`fU#oYR zmo-p4@qWSSdo(R7H2Me+PYx|F!CGykI;w^~+Lp1TK(h zGlDS43N<>h!FJ%&^Ege+VWeEz*)2!XyV3LAQ9%=JM+qe@@ymvx1@;YY^ddZI=V~{U ztH}jcHrgG6PmTn|4|OA?DS5;c3<2TBgfr*Cc^gLRHnx+M8BH@CN%A3+8J@#Tf^R&B zlW))pTZ~Z(C@UfsiP-DtV8bX0BRGdz0j6gvG*c3~_g$p#U$Qa{LyT3N_SKScD)9MT z+V|^nJ$)xg3_u0e6mq&MFfbd^S0OPUzEEz;XF%nFzdil<8+ajN z6Q5RIUcRWJ)t&c4^iObO?qgs=Aw?6uevx+@IKNFYwN>F<1yQ%bS&8ivZ--nwx@H3g z?7j0-+XUyA%;53k$Ny7~r8CXbB$_}a0Wu~6+eWL7uWpX3{k(Sf4~F_*KLUOZQ{rYm9uP%>Y>f&T z@h)Qk(PBzw{rw~tn25FV)VUu3pfyZSPpk5DefbhIM0vmaFb%hmtlailko)5PB6UIr zHNySmv-9K7eYv}v#Fc}y$vZ@L`>9$%FXDo&A1m?-w@-pOr)-jTSF-qezmGPpHXi3X z8hPS|szl2!MaI}-u>-)P&a!#A+TU$5fci5ePlQ@F-LBHVT=WYwtBKUh6i= ze<|d-4XxmA&1fEL@zi5}pR0Us$W`3Cm8P^bNa>_YQANor zN*5;V(dK)0rFg|O9OfB0&t0P2&RO>!5i6T!j;vF<7mPRHSzbx%G7i-zRs~O0E*q zg(4|FTXeng)C+rk*T86qVRlw@U_y-eTCOKZ^r%9000ki>IvccoFgrv6`ipFPndS8O z|H*qwPh6r!=q5U-mkxeloAXYzZIeyLNL=zg+Iy_t`odex1Gqd9b4YXW}u5W?c z&CkiPhcOCccl*Uc|J4YBj9;sJeLO@Ic^R(9ia#Nxf7H48I8(Ce$A@}s_>07(Y- zkK#`dZD^(<(U5Pyr~;|l=Kn%#zx<*=j0p@{lDBH0K?FBXfa+`%sI=iuu*SVDFNZUB zsR_Bg%h%C9-Wp8uROzy1ES*9xpfhIs*`A ze<$w=4mUS94k^cV$V#*{Fn|C>-G4v45Jx@YsO%uW0;QFzau{;yzws1UK)u)$j_u7_ zPHiG}O2PDQ2n{)_jH?|03AX>?7@=}#2&SQ@69FBe&M}8kR#XbFsqfu(1AHOpF0STM zAV^m?t4B8$feEAz8`{t1eHV&sk_1Zo-*Q+#zfHJEP)BTlNEBXts~n8V?u2>*^Q=SL zw70A03nA#_sc1aN_559&wG}P5R{N;@EC}ZROU=59DK2(~DKTua00X_sl_nT&qxKKd z(UwkrgYE;i@eBX-+f>mAhc;Fq~Sv!S)+C1M7M9^MlN*W z0ULOx0OPohmO{?0I8HlqhmM)sv2OpMlTDxwXu@}tSj&^O^XAb3f4zBtAzvc79LjLH z?j=PKkQt3e#GV`_fE?g|U;n3Bz2yxKnM*XwsX!%wPKQqvOC?|(|M1T{f+LpJ$S@R# z%2qEH-tq7PI1K_c*VVQD@x$2o)^4B_){I3zHJAqOed^#J)PlG{bF#r-@e4OpW4`o+ zzyMEycl;ffLygR4X`N$pn2n!^m6DuMItfqNB4wRuB@#}Ko|bX&d@Mr_ZiCi%FZ%t) zYp?Z58fP{`Rf9yUC|kFw6qTML<5%`U2_R_IR(vAJl)65%_b+b!v&9mQfiL3vKOdwI zbiLE>WhH)2;Ilwkk)Rg0(u4gxt*--ln+*{+&5L?dbV3V{pgI_I)pePsef6c3E@*L# zie;vMMQf<5Pug0cuoy=g16zBv+LOfll3thf*yhd=?x)*~l<|}*(2gfc(Y=(OxRp%L zOA2Ll#OLSPS2ye64kUWw%R=j|+!w*Aa-@iyZG)5`+&7h2&Llk&8571;2aoX+K<`Rm z%$fT&Eo$*~jcM#%2#*0Tp4ef4Syv(nq^^Z638hkHF9gPVK@0atB>7#4zUv4ovSie* zu5m6`Jux``8l7yKz>^4Dk<>Z~BWT;*_GJvAL2Q>g&g8XEW2y#wJr#vX$6W`lOEp~Z z&1bq2uVK=$*QuTotM5MzF!K^d+>J-0I--~h#i^i-k&KsaCTOe(-gsF*n2|8ntXUhGHmu);D`S4;7`FAng@q(dpjSKM0Ww2mT z18PB@z6_&^H$Y`j9MZm*#q6^e`45hnNSrsPj{7|dodeX_I#QeP)8p>_L+@O)x_Q$5 z56f3d{l|etu9Vkd2VEV|2f!M>^V^uSgfypGpyNQZVEZ2&6{-$@J~UY%^`bg0VaRHb z=pT`qKy(MeNk?3ZoQO(&2fuj~UiUY;ff1G%_9Ou+I1PT;-|OiiXsWBM~rLp$5ZMUv~|1!6>lnqlq;Br0I+a5 zwMMUkifmXT`p^4s&Dz-XhWrqs=Q_veN>joa>&@FP3adac0{E-Nl*awL(7wltlF17& zvkxraKSO7$EFuN}G<+##bL5k^-=)Lc1^a(l-9V&+qZU6X;5HQecD20{Rz+F+gN$F< zS^0-Ko$*N=J=}L0vP4$_9)o%EZ$aA#;zB8ybUQQl;0&8nu}P3>gjfrWZbYDrjml%C zE~#{SW)KFw9p?Mc8ZYc|;mw6P}I&@xCjOW9n|m^`SL zOKTk}a*;=nvOItAvIDz|PQ6pD@N`GBSZ3~5v~Z+81T3GJ5q*>2LC<}HKY zSgp*n??FUq)TI^HG>P_S6reMy3h}&MBQ&T%*{@8zHqIn$gVgRkDj?mJYoT#FiYTbVxH9R$uf}4T0Esp4EUiq*Cm4eRB z<>6tgqM2q@VAr6FZQ+PILv?qFlZz8*0!m29IY3d^ioGX=7ji`5v2e63b{@>Rn=N1y z)KIKGEo+5{lCLdQ>B~y*M+3kre~f zL?2rysE5FIK|%a|1rL@msvU2X7^MeOMy@B8E$^02x+*Qs0Et5|mAT$z$tn9S{di>} zQ;Ji~GM5oHLMp(Wq_LENpv2u69jEjzm`WhN!msdF$}50lCyJx{l@6EK_4m;3Qm=_` za#sOqOaYYv=oNG@HF9D`Y?75XuN>wd@aT!L3H&g1?9a{o<*PnCfAZZ+_pHGx1!CYK zsG#{@N0p_}q)8Z&12aW^f6jDfCGh{Lr+GGY3jFERwh#^iHOE5JiqppqQA04FPHY!T zeSh5%Ki^(sR1J(zjfcr-Fr(1Ib1VRwQ?Vm7F?YRxKh=Yj*uuI@!w660NLLp~C91`m z*0;WZPL$;LVf#+|UOO+QPCkr&YsQyCrenhm0dBaKMCD zH{}*;Oz-@~?+Wb)cvm{`g{93zd`u^cBhnVX#RI@}Fj#3k(X3 zHcW_m!%D<@6rFTX`WS@z;`VY?NOxqsF~!OT`>GmYD<6p(F`z3PXENaNfYvIivE7m< ze4KI`9%EbkBRwSEf`_ZI-8A%42p*Y17)LW+o;;lj4b~z?FiXUm7<}HJ=^ExE?z%h~ zjk_Kzl6uNCntZ<`y*o9s$_*BPDFE%Mqku1v%@_cuO;My`GM zMY(osuzDdx;=g&hCWZ|m(?8;kv4ldag@0%{vd*sy&r!bPs}r?dM<-mckAIwzN5vf{ zCwf@f;)FGg_D-g-M5Vc~rtBW9rdokg8FP+Nd@V7gNxepave#3BZ!<<&sB$_asz|@q zDfvTgg{RLFNTjEU9ARKK^(tOrA1|W204`o#oh=JaD|uC2Dxn$QIDe znu=H3i@9ccY`-IV>HE|BUd$uce2L!+3+nFn`fX(9;kpcc{d{M0UM(##{(kTak9C&; zg;cMlNTV|A*&tNk7fJnTwJcCnDzy67qJLUiY-ZRp{UFX}X77Bnw<0j`K)G{s|N2<% zJ>W0J+*|(HwW&Fi!%Z^7ATL14+&O^X}@#qCo`Bd+Dv zso^sL6798Filt71&YWS5c~|hKXj7JjTmzp){)%ReuaeAU1PRlq-|6PugL&8u|3%X) zs_F()Q39##3gn>iDw@~H@5lwF>dD@cjsgDP(RvJS)9_qsVNRp2@t9@7tIa=6uOu5e z)cc6)8IFj_IE|?uPb+wScL=SW`C7Aonca4X6nTG)*q~R6^-5H|+C?g$*l?b~c>_f_ z#Wsr0Fh}!JI#a%!61Hf(OZr&5-0BoL@0G?XDl&?OFzGal%np|E3e|$=t0jQBdQP2v zrK-e?%TU$6ToR0@G-VC1#fU@^#(57bZH~XQ2*i!sp8G6laX-?VQPk;-E~<#ch#R;Y zi~`!PYMdhLk51zb5wf616(JkR^8Ur*RJYDF`{*0h04AJaWUz(If!`W>Bbg1+6f}_6 zPn=l_=`>J(g%|<`J5=~As~g`%Y1!~ga_ie*@!J|YytO%$rGUZg(!eK`&-txwZQNek z2&b6I+`6SNIABrT1E)ys$J)@Vp@AP66OndlNDY)irF`K;x!c39BdJOQ^Ic@_U5X>i z3Osa+lx@f8lr3vhQcs%s&zk+4%VS=a30r=3>y6~DXRaY&(@{-V1S7Tnt?1rON=KGQfP+5 zQ=OaGsE3v`K3M`H1F20vC||5h)19+&*NWSQN0@T_x*}bi(TG( z1yGPRt}P-Lgb5-L?Il#FB=tnxWom7`I2sxkSm?BC`$i)W4Sr=jKyVvh`)Yf?e31m^ z^;W^V>596IJEWIX@CODa0Xn9>iCqVs3@a#Lw6s1!-W65d2ry7;Ajr4hf_ldPkc=Hj z5I5FFw|qZe+^;4`MP2|R@}R!Wvwdczw+Bl4Ed5dfVeY;7y~!4Z^|8ZLcX9xWML*zU z07f6}gt!i>C*tZBSIUtin$Uy6%*_|xqWN}gSPJS`A zWNS6yW6YVaEl_bbDchrc`h(6&No@g{>e;8P2mvC{l<+&gn_^bF8aM-#^Z+;|qgen~ zoG)(XQ&{8-|JznNi3R0C3cz2m`W_vzg7j?YnEBE{kq=*v4YCh1uwY8+8W6`R@To0s zP*)bx^DfDRIx`if`t`VNfc;_+M4wSCcqNXEFcQpt`V)lJxBgwnFG<`s`0EtB*-`lF zpjVFbE(24?u|ueJRSk^;^tSU(4i1QPkZ49MJlxL--@NitH^7GgBHGU1c5uke$x++< z`}_CX^j5kGV|cF`8XC^VKR@j2w@nJvOmA%xwm8ACMSoJ6XNxUNnJVmuxwAik4?B?# zYQ9Yh&JVkC*$#MYt<7}m#oyii5GOJr@Q}>R3)n!D=qjqMmP>3hy!& zq}it}FqMjhUGbaFKAXUId;%kT1!`xOP0~#m$OTZMlg^$Odt_u}7?u9o=U*d7|9}BN zL=|i644lHg1D~7kjMnzOq% z;)$Z~U`?amKiIi3vARyhdwrjC%}ds&aM8uY2(5PJxFon3{@+{{0s19381bVnQ{cb|h6bsAhHcjmNNC9~3l*+osCw z#NLs^L6{4JdZ|N!Mn-(W#6ztZKr^LL@k>vnyzjavmV)zLDy7D80!T_VxWV(mziZ-6 zSWvkE`d>HwoSuzaRcw z$L`O(?nUWgidfNP%IIM3y;<8bHB27GbfA z<;thzH+B^=rWI=4l1~qE^e!YMWy12I(I@9-dr=G9&lh{7lS#bCBAb!SmWPs=kpp+q1;)Ru_qj`H>etwa2Q4$U$;I5 zBABG-&&lihaB6^al?b;nCSoW|C2ED=P_?zA+AsfbhXaLRYO=W&*SnvvcrJ8w8z8{G)W3lwV&VgC;maG9@J9L0^3lSrVC0&o7O160SsuI+XP=x{b`Cn0bW$f==8bW z+5rm>W7nC!bN4>r8mxicX-(mR9S$B&NT`yBtTh1U-xU`VK{8E&BTU+91Os4%4LlgI zJ#Xkz-$I@iSl+c0#d#@u;D3V(u?}OkK92nWPi!+p0QdHV;3_T?SkK+=H%6uO43@%( z6-`)^&H{|H3-FO%fc=t00O+(vF2th9rj|?E=EU06!~p!!WeqmfN_{l+}HP%I$|6q?H-SB z4+{@x326iag%gO3?CrUuTOor4CcOXp_3PN7W|$xY|3GN($&jk@Y z=gys#8`R0TfEhzn~dr=?_N2fMqwhlY}khE5-$DJd!OT^ZYd?)k#EZxK;Zx>v3g zfx{ode3@8SCJ1-I^-LxGq?}TLq_?!RR8^(9WCx-3Fn`T+pELXb$;;4_;o93L$P~y? z7C&?5B2D7+=fbgVoxXA3bL?rxsvLE20(5^;^MTWg1Z9*=3N>^1lv$;i1i8+>XLWTI zzG2wbME!S)2>KvJ6BcN6Uh50Vr->e+xw~%#TFZU@@Nq%Tr!&`q10gWH*UsE?ZRrkQ zbPz%02#txU>C`<6q#PtCC;#}iPGJkR`h4);zq7Kk63Q?{VKz2203gdJB0|UgrKcz0 zi1^vFjcY{=3=FO;$rJLBFVo07aPQu|W5Vv# zV`Bjn|Md(0K#3-&XZFjNFCY~I$_+MPWb*{~;}!2sSn@Y+-n@DJ`VVRnH#_^REEGM@ zSYAH9;Kp&N5AGfwgs-nI$iImaQ-v5x^DES%UwWdfVNLb~17&sPd#dD76fLU2qrOy7f^ z1Lo;@j5qxC@=D9tT_IuNPoF+rzI3T}W64xSr8z~`!xw542ow@)t@rPEV3=r<5ACnm zB>`DkSs5AjEwm;gGAb(S?o*%_xF5TY_KPrUj24FnLAH&ItSl50X$6He?N_2OY#3r{ zV0y&9dJ*f5Bs_DOv>9D8+p&4KQAdMDJ*4u6lsNu4+;8KTAqNLXu~DcY^N}ABUXKqvombA%U6`LQD@fPP z0dCvdc`SSn^_7WmJr0MYdzdc|*@U^q3Rh0YbFi^}7rn4!M}jLS((?y&4q26ztT=_w zc9P@ep`)!Pr)ao6ukPOQ-pEwqbwPn6Y*PaRgLCK3fsD0YftH5G2zc`=S0WZUObfZ^ zx}hb!eEG5YQCL(cwz0Wi=N58QrdrrnXK;JtE$|1}&ZVu$wZ`T*JC z;m2i_JO1iOm0JVqZH2Wiy4)l*EG#JC5w#M00Xh-YkwAo&_tv?u0|R4BU*TJo3!s>m zS5%DF`ElV5{oF6T1RL?%@bK{Y=IWO(Z=liAr^EhNQCA)g_1cEDlO@O2kZ2|ep>oOA zgjBXu85$ZA@^O?DVPq*B#>rY~vWw(Qh~a28WXn=2B^nhcg@%Sww48)|_fOaPzW-dV zF@DSYKF|H!@AKTZK}g>Er%z(WjY(^9_`UI%>%M&t`U6N3e?SN%Qb^ysw@Vl-b*k_g z3D5c}?(D~?`z!!cDX)CJzP?;No8utFNU}7E^mcS~YP6z$z~w4{xumEl=@j=kF8+{M z+#LXA8P#9a&SBWu#Ms!8PM4FH?{7t2ish=1#F=XCKKy#Nkgin^BnMjo7K??B-CVOM zm0R%PjsjRi0CSlsEk$MNHUri>Wki$D=M#kau3asPc35^y@q)v?(4OACJH5@TaMrwj z)6*vu1oe$4MMM}UPEb=U|8wmHUWlco9Xqeu>pvT+W8v!EHah>6Ok%kBA zRI2jYdo4~CZu@Dpj%Ux7tEkkWP#+l?;ryCW2iPrIK%5}LB?t=^SF;5jA*rVkof^cS z_4Ym(9FK{OO-W8JcdH%w@B!#A9Snj1wT_s$INO$kR_mQMHscc$N)svE<9b=|AkT$x z-*eSlJkElLkn;-l7lwWn$xoj@|JJ&~AJJ1^OUrij3Nh7a%h6lE-k<*11#CdfB5^{* z#78(?5jArd5-tEN#Dx{-T@p-MtR#K=a5je}9XD}4DhMvg$*Kw4J9~RKBMwlWP%NxD z4qGCppWnF?V^Q#SlL?m3Y`(P`@-E+XjYh_Un(FFRI*6Rf%PJXVG>y!i>XlxHJUq;` z750?SDK(of<9$_Nd;yZlPTC5xItd90s8$x4M#7Ty?9`jP2TqvY>9WFQ)K0P)x_iki z2}#Xg|F+5=J~bWS;i`xrqsDIgXozEt+tgs$KA5}DZJa7{V(~8;a!X?yJKh_6B?fK@ zM~wCN_jh#Uqh;op_(mg``dr5n{`|Swxf?MkGV%v<+1udoJ$u+(uFjY`yA4_;IY1@` zIcO$KDZNN$0lt;H@;YCHlCF_8<;B$L-qJ>U@lir$q#SLm?Lx_pQ4P6jQo7nqsqEZ zH_o5>w*yjS+6gfTYpt)Z535Wd5M-NS5D62vyc3gpZc_ilk(Er>0vcR2Ue-n!>gtZ6 zeG?^4<9js!11k;&^2W!_s&gnR*b9stM}A1^vR zE2WSmFE7tWZbd0lK!fc1G8L-Ph7BFv-9v+eT^$|K(pM@fh69Y8oSf|K?eWqS6%|*r zOhP>V?THs@@W*Cgf|fzqJln9!(C`)t>ibPi3&_nL8{*qO-o8yW&5sgxu920MWu4{m zjLYS5`_HgcQE6n^a=^$nwYTqDB~5~UPtT3gs;cK*U9#lsAiLlf@_Gt(YWPv|LzAtY z-Q5&$D18`qNRWp1J0Hbgd_(-I@k_DL(#y%rY;I}6Q=0+l<7W^71_bc7zrSC!0N!r^ z)-anoSw*w*&Q^3Rp_5s~i|z7P{`3XkU~i))bG!ZV{rkSxovBFN->0U8!PRkbEt!jq zWFQAZ)7ZHSI(&pEZf2E{@*@btE3H_85doNsMw*$`fBzoN-Q@td1;}}4wc9RKR1ike z(bcsS?e8Q@N=iDE(AC+znwmjV1qFrX+a`?w09t7S|MBq|Mn@q4UUs%poW;h#>t1>< z?%fl0kNAm+*+oki#&mZ(@mx(^iwg&+K5`j9?aM>Mkf_QyoH!$nLkUEGA|oTK4u%1z zfpt2^W+OWyL0K|~A*X~7G8hbL?}*_2s@rpwNFNBi34wJxK7m&YsFrOepVWe{s@@Br`$QO=JNBrva{{@eyu2Kr5GA7y z(0+{182vsCKvhB?DfS%u3AgS3A>j5O;>>pqbH3}LVLasy<4BF4B~B1SH@z7i4l+_U$VstVuFWtn>SC_Vch*kt9t%!x^U{)A%5@p5 zqGX$|3J3{_^cC5$V+WcUot>SH_N;y1Uq^>OQSpFxiift1x_j3!(Lm~e`OF)Co}%J-&b&C`&?bY z#CsOd82k3^qtmhVt7~cyOP=Fhxw`tneR^<^otvvHCwD!d-$EgY-`R=y8aCJ}shg3L zm#3nr*w!C_O-2Z>JbbuQE6s($_(lxdqrC_yTunw(PcIMzIdXbvR8?Hu4&0u*`zs9I zAV44g(D#05$iU~3@{%P>I1X8CcGKWEA^yCYh02a-WI{I-g{7ybhYBSyC@7yA76^$f zf-?sNvK2gKQqqHS@Kf`;ib@FE;gmSPw>P4!Zu=(OqKKQP(OND;yR`^Q1>$5G>)l(6 zPME5aA*EO5R0@A9(1Gx3`HCF^#}eFsMtb_d(Er#eC3K3i+12Zfm03EJCYXP_X|vlkX1f?`UK;X(Sd<;d3ohdr9nYK zZC-&#j~;~qhO28Xj~90GKlnB+ZuJ(_=f`-F5p2a-u~6Y4RpG4?w=^TPH08dBo@! z9`j?F5k~YRp||5PKNM}Zk`fZxi&4_TOv(M$V^j!8nxMZoFOBo`_pgWbRCDuo(|mUZ zgQz{fehs1e)E((H;J%EG_-9N~T)eA2esc^ldR23L8Cl(z(9qD+^F1qe zB}MDyiPEMx69tc1qHAEoyf*?t+WGU~2*eh{`18;Z^DfE{FlyW6T!NTPa4bSVuCPy+ zV`CRoUVM4?u2s;>h~Iu2LNYVE(r9d9t(B&vsMsH)k7c$fHaV{ZeB6 zi?C&hfdL(f5W9Dc5u*8(QYmt$h)vuIEY#M*Qyky{!EtOJBvUt3!XNPnz76Nq2W)U>ayO%t?+ zkx{7Nej3W}t)j$eL{)egpc@Vqg8~EJy?>9L5EytB%eSoTytNuw84z#n?eEbYjwuV2 zjm=#Z`)mHN`T{v#aOt86+ed^#MH1;7S}6wy1_JWQ;+?BlbS*~*2bnb685meVnggLu z+5!%AmuVVQ6)2(RJP=4ZuUuG+W#z-*?-#rqqc0 zf(g)J>(;G%nMX;rE}&7ZWL)^GsPjy{mc&_%ziwQhkQ8TFyU4wq>xjY&H4oTnmoPj@{eKEj(iW3v7T-+W-In literal 0 HcmV?d00001 From 908be72847e96ef0fa38fe8a90835122acc69f33 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 16:11:59 -0500 Subject: [PATCH 092/105] core: remove minerOwn logic from MESS arbitration This would present an exploit where the adversary would simply use the etherbase(s) of powerful honest miners to get them to permit the reorg. Signed-off-by: meows --- core/blockchain.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 24492bf13f..1ec95526ea 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1556,9 +1556,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Reorg data error was nil. // Proceed with further reorg arbitration. // If the node is mining and trying to insert their own block, we want to allow that (do not override miners). - minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) - if !minerOwn && - bc.IsArtificialFinalityEnabled() && + if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, currentBlock.Number()) { if err := bc.ecbp1100(d.commonBlock.Header(), currentBlock.Header(), block.Header()); err != nil { @@ -1760,9 +1758,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // It will. That means we are on a different chain currently. // Check if artificial finality forbids the reorganization, // effectively overriding the simple (original) TD comparison check. - minerOwn := bc.shouldPreserve != nil && bc.shouldPreserve(block) - if !minerOwn && - bc.IsArtificialFinalityEnabled() && + + if bc.IsArtificialFinalityEnabled() && bc.chainConfig.IsEnabled(bc.chainConfig.GetECBP1100Transition, current.Number()) { if err := bc.ecbp1100(reorgData.commonBlock.Header(), current.Header(), block.Header()); err != nil { From 0cb3d6851abf20daf884bff407f32a9ee70f0a31 Mon Sep 17 00:00:00 2001 From: meows Date: Wed, 23 Sep 2020 16:24:25 -0500 Subject: [PATCH 093/105] core/reorgs-MESS.png: remove graph file accidentally committed Signed-off-by: meows --- core/reorgs-MESS.png | Bin 45031 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 core/reorgs-MESS.png diff --git a/core/reorgs-MESS.png b/core/reorgs-MESS.png deleted file mode 100644 index ddee1dd59436c9a8680b1cb2805aa6c6a0b7b484..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45031 zcmbrm2T)Yo76pibjKTwvoRlO%5R@DQ6-kmrqBbZQK|m0iCOrWWBuW&8Mv{o+9Gf5@ zB0`gqoO8|@X7|(g|DUOunyMMgvfA$3H=MK2+H0-7PX}nI-91M_PlAVscTP$1wiX^9 z{tZ05Q(;7>;hohNR6%%nf8#0LmeWSYEe;X7cTJuUu5y(+F4`}jB69aEC;1gHA=FOcxZJB2bR9p_Y}rOZ8AM)CKa zI;u3P?!{IN?u5^TtD{#p!{f9l&f+eL`;+>He~IAtOFX=9Gk;#0{heRP0Iyqr{QRk^ zrpD>3or8Fks1fPxsC*i`!Dm=6%W?P4ouMj^3bW?u<>lpK^}x|uA3}Y7edW}S-rnA( z5-H__=77x1OhR&I3VC-Ak0|BCf)tAUf`W+fa3VHOPfxNsv5n|)Qd4<(Z$wUxMxuPG za#TtuOKR_%fPgXA;pXOyCk=11pXCHne{mdjmEq*{qZnUW%Ida$)Pd)815cfdk?0hf z(Q*3c+SXRQxTE=%;>UaIQ!VdrNV@Olirq=o>i+T|Lp?#}!53|Z!{G$aeWm1fG8UOZ z4*tH}$93_NF7;^!S;dd*{5)=GW@+!OC3)g`Vt31F`sbZr?s!ts_4<7&*C4f3N*L>g4!nrTS!eI0J4;zslXU z&hK0TYCb)wWrSId;v%1(20UN+iv?w>RBayK2Vc7K&6s%N{v(q$s_7 z`O@jf`cyMEUb0b8)!qGYb*wH`-LTM7v+xBa9o=4w=*Zy)R<^`$#Ce?`?$x#@v#%}j z7HJV{%&6G20ZrJBRf2^phR06H&Y2%FWHq(|2>N ztgKclH(Kihs0>S<9W~Gxy;J2}OHI7>TIAW_yO6b>93!vI+4dh%yy|GFG6V)ySTpRD zzR;V4eSagvbz@rgaMi~N^%si2JyzI)Q!P%+qBTy}$fHQZF0#l1i-vPzHak?#Tidu% za>0ZP#;U$1XJHmL|JfQZNk~RJ`tvY`-*CG00|w3o-@5_#b(*{gPoswzZ}qmA=OGeC zx6As4mX=lkf8>noT&LH`(N1a%g7WMb1g3A8K=tAJuRIe;=S5l>_tJ`rgT4X_CTx~U z%+;fV9RYb^v5<)P-6pOC3i<8jAvbuE1h3;Vi0ymtuPZ4jUHN+H%`I!|FBKJTmX@15cAR6Uw>M$34he!jcb7|L?8 zGvXzCylW!=9VT53hR9`QSj4uUHuzgjP0f}2pDwr_=Zl3fzyl}U37oxuO%wN>g`DcD z-SNQwqNk$PU9(;3_X}AsU6N1LvK=gaE7S7xr<03IHH3i=A$c|eW4*Ph+0)dKs&eM^ z>69g>huDq76s3r1Y=V)mnRIzvT%3%@epiO>3XB4b_7dFY#J6B7Dk_!KYv+AmzowGM zo#Te|rAtZXE$9~~Z|qs5$NDdJ^Z*C*kzCMPEurI2j{0|(!N8A8$w>0lWJ3RGE2 z4#gsx(Xbq@J}k{8vMapddAPg3wIG?DH?Ks}@Wqjvg3WZMHD2waWM>Pa5}Eg?!g*(T z$g%1io(92V_w%d1$hp)Q&%G%c`S0+=D&_$&y@B2N!=49AFd7{%C72r8=QtypMn=45 z+Y&=8KKCvJE0{d~P`lv{=P@%fD79x}V;2{XmBBbK(X*L0g>$@mAK(*!@nCz2x1W-wy#Oo2z{se|lJ@CSd081|Z~8-O2PNTRFxJRz zsR+O}Q2iW1-GE*7qtD_rwvA4H-3U&zBBKgrH8nLIo#+Q63zNI$%qTFfj^@Y!dP~XGYe8U(- zpSd6J&F~u(5|NTJNFfC=-pjM?$+%@`yK#Z!&QgET%1E_i|Bb+YX2CEmX^YjGm7OZMQ}Qe!(9MoGBYtP&CStKQg*-+?9R|l zkaiQ{B&i({=zF#QiDT7(rj#OLPF0srdvo}jRQg!_CXp-o|%U$Z2ycEzMrPX zZ!bxJk8j&wsFc$iIP=KR_Iw7)r*;(}{$U>JDguLQ?(!K|BMa5yZtFR108O14? zm|z~3&NE501^+6v>V(_^5C62NzbS&t!rXj$uuPbf(|ok%B@Cxy?(`iL#&fvRRVA9w zAvc>xC$kjB5R$}IgTh%z8GZR?f$f$yz1ffAC0;N~Ay<2iy#A6eel|2eJ>65`VmH?b zkkXcRhJcJhow&H41{O*DyLZ1Ju@Ri3j}v#~1T1+z^#k(jTM& zo(6;d`CGme1CMGD;U@Y57uC2i`hCEs5qLr>~Coqym*U4p5US-hI4*^dk43cRKQef`R~} zZb(U0!rc~2c0oFXz%GL6I?XU`B9~BXH-g@t+5R+8@NB4jb#)c86x`E-lf=Mjx28N7 z?WCmSWQaHU1Br~MmGN_hgy-ltf&Ktcouj|*U&SjT;tb1SXJsS@f!QCcdu>=`y*2E% zqU1(N_+c6-4&ix5MP=pntt}>LRADJ*vFXQ;BnHKF?Vb!>JIKCH<*Y;y#}W?Xw;}c* zcihd9?yL4XDRZ0w3dwN8`Z|QdD|xmhb9i)M9CHf`479YRwgbhHk&$^Ojc*q04|mt> zs*kphj}FY-81RsMky(LlcpoicH183T3skOto$E}SqiIM23X^SAEpz?46I>Uji;$_c zNiFWi)1L8$z{ViDWDRbELd&;r-?qbY3g!Evo5M^?D~98E1?KGz>kufB%BeaTsl9j! zVh%}wB>6GL<`x!4)t(qw_nUK&LJf+nA3!u~CM&d{rQO<-?)ZjXNVO7a+3c( z-SXk}d8U3y0c!B9fG&{eeZ%TrlXuo)H|IJ(T(`;x3{FW&0f=mCZhpWMHBsa=_7z`_ zf>G3GW)IdNBhVUPWSMYLi0e7MY`Ai=Lh$=*Cf_PuHvpmSuG34pEV~0g)I1EkwI3wCC`H|lM@rd)?J@}{rUwVyfjb(Fx~8QPCH$z zp{{Ns#>g{WHwV4=#a=5zCkt+>+NMuPUJ;^%MgVcxp(1bu1418c4&wu`3ClDckt6G} z{1Bl0zLwTvmTUUuyCLix94}_uYS)q$3>hVy{3$xJ3`^*Q%^Ah)|E8d1VS(qIGrCl0 zkkx&EP9kw5Z-}P9*!Ez#Vg-ONf}$f+zaWB3y=rIJO-Cg(AOM2td~?1*mQI#V?`KVu zuFsllYils^+B{JZNRNPa2VUg2bb!eTVU|{2@#cipwG8o8V%=@M_$v=aba9|0hn1+d zwiZJF`2CHiv#)w!L@JPLPgGS^L4Vlp1j51rRJr#~@Ns{km0_`sxW|5>&kY5v{rjhTA3Py@b_^)}`Y;pCZ@3RD4^kfB2|P(lU0pJ8Y#a=!#nX#E zW5irV!KgygN^oCSa&mG)q4xd%y1czKaQEZ~i)^_}9-N4dteK$xDxhyeQ+Fep^4eEl8XSO>MD3Xi=yS_{L7 zg0Onp_4)C@4y8N@sHfNIBO)R|l+88A2pIA^4HVnLz;AABM5w8%s8qPDplgjO(twr* z`q-l+OMoauMqY61G?N`oRf!oK9DFZoSLM9a4}a&jP#M1=iL$Js05 z`4Vv@MMX=9L?@@s$)<>dt%VZf+S6;Pfc}FOF4&l8i+T60Zr#l_U{rH+k+_^S6Yo;bk^QhN9PRKZRhhc4TOMmiPD(le zd>tqNj=X`0d(&(CL*z0a-%lWVrb6v=0Et#Wh&=evfk?QyiN?@QSuOvpqs0R3Sve$J z;cgJE9M^F9i$EFwH*fl@y{c_UrKP1IaTG#oda=+G%pzL_(qjgjkS5SKz}O*tr=XzV z@}*0b9!L9I)yMl!_Z9YA*yBC7<{zvDZEnn9J01S=u!4y=Iot^K;eXORf9T8uFb;xi z(*e>>y|5b%pWf)oiaJYYt>8Vymw^XBH0EMr$1ePy zJ}B%M-dl2#+pLiDdrUyitcuMq7RBPIO65)5NYMxTp(&*RIM>)u0NgtcINy9Q29j3@ zmP)g+puZc*P;&|w+s<64#e7$Kv0E&)+f2#8Q(Q2ZW6}4X%D`{}e`{Mx?VJO(b~8== zonRuU)y=%Zl_H613~+fTv=)%Q*Y^MXpy6F7_Er4>s{D_WbBRc73;Jj_0E0LKq4zE zyWatMYrZE-g#}!uzvL|~EW0DVKcn9dnDv2ts*|brR;Jj+^8oN1C?Ts%>fNgumH`z7 zPSf{4snQ7=<9YBcnKwK_Q1}A$whM>X@5yfb6c%e>8qM?D7&yi32CNPI2XS*b?Au{5 zgDes}6ug?IL2INNmSiKNS*2%eLHZR_!OCvfIK+v2JK2nGYY4{7=dmvGK;Pj>=*S4>Y`cYpgaD$b z85mTa3JD8SQBn#C30*~0?pB{1sq?|v9QLKt)Y3l4df}8{YMN@Z$OO;;@N{Ur&VB~_ zBf(=^-`Tl%ldlDUzGEO@LrOgj%otHo(S6$im;(qVUt%hd6lz9UR*!RB>FDSH@FZW% zcL8z$j4*+yOan2IUel7;ZJnW={v4=MVPRoh9OFKJ-WkYd06~zc`aa6}zzqWA$XBZf z*auz+e*OCOFZS{+$LG)E^dD+zh07<}e>x>DF7DJq6T5g`>pJiTU@KZSHkmm&E0D~* zj`u9qIQkX$@9QN2;RFqQ!@A2~=7oz(&ZCNL;9sxLF^D|=>Ps%OYYf?P1+y~@By*t9 zYGQhtv%*Nx;E;?mGtuDJxzr?7V3(v4d;?+(1X>I&yXN-8)*8)ivpW_HB}z zePA-Afcsz&2!z-f;8EwsChE}uRTeqep=FL183EuwZUVi9DT{gkejoUfGL3ekJbrm3 z7tzajkL>9lly$rB`j*RRAVFdZ<*Lgb|$p0Xytstw~ zLL5Pk;W>W#Gqz<7K*V{rZEU_fgXGoMCKgcEPkR7?fwNZuy#cN-p-0{NGs`kgKQ7`N zs3(e*dm7jC59Hf@{rpf!B#>emA)$OAxE)6NX*5Q9NB1Rf+=vQstiM9dtCKRK>xM)I z0kekn&e)g*&kG40TxhSrK{u2$TICw zephU~L5c=98Wsw~=haJ>ypQYafN%s3;t@(}gWmv7a7=Xc^u)xi3Vj}Nas5;+xRI9O zO%RSAM-)d3xzzD$7Lv|IxX2m@;SGt0#>o9I8XAj(FA?oULck77OG{x2fVS9;eSHO- zFW+k*JSr}35RRT|jsa8PBJ$CL$C9z28SwG(nWT_mGp-iq&s0*isKvez{+&-Yl5j?9 z8^@Z7WyY0OoAOy*%Xlc2###|A}?HBf* zz63Nlf%FJpUFb+9Jxa-M`K(_s4f*A=OmJY}{fus%7|D)n-QYDG10chd_YMv~1jP@d zl_`8*UewKg2n%fpc#!<9Tk%$C9l%!K9N=&Ca@vKZ4WP{!WFN;?A#fpBEzNE06-^Z+X0Q%` zLn;|T&4k?->FTP)wZFd)xG;BT4q#_|8ZGWP4WsV4JN6pj7RXfpfWX5B#Cqx(;r=t) znR@L50|N-m(pAC{2*?1i)P^^6_!#*_B_#T{jUNk$pWm(rO;8P|1s4mn z$27>+__)N07afp5VHGME`1<E+h!$toXj57=G1b`7X~TFlr5Jr)Xz(Zjb-L47hw zI3Xv3MQ4?4i!Jmna0?4}e#toXcOz+)EFrQIjH=KXR|k7FLSkUa{Z$^4N2kZY7g14D zyJ#nMt4WQFn0Re=9M!va z>7ZU;`}y^UP{QKB22Tvob&g4*5Y`vlO92L;E)jK6 z3<@%@A+>qI{A~8?T8I$V2|7h}JPmE2mOoLwd~orVJjn7Oh*ijiiigT+HT>fS_*X*^ zc8jIbx;cpW>sB2Aj}0H}Xo453ZZ>v+W#)8yw;eXPU5k`LAE! z1;-R{Y5`eYid?&dagoNAVZeX`IQ%srY5wyaaDxM!e`6$TXJ@z1kBI<(o`#O@3fgt@ z-2~=m4-nQa>mp&vBLA5^3$*Jez>iygWOMyRyx5I#+5r({2*8Qb&L@Zhs5#tnFNgDk zc+Tja2lUp-&{=}qmV(&_)d|j)B2k{`P#%P7`^J^bPXj6;80oiYi$o1>0Fj~>3p%K+ z3SnWV!AhKBta*6`m#=`G(2Lk8dR?nkBCCUJE+HhaI_WI`{j+AW^WrZ#NoNc5tLVh( zpFeG3sPtakz*~KHWr=k3)W_Lb+W}m?$$js+!#E7lT!{i!YJ&{Z+XeX#Di~n*Tr>S1 z1~YyHOMROYBBumy1A>4~$n-nZ-_-49s2CXsA?Vd*_Dt6&n{crxyM1frs!rMSsTs7} zGbkqj(@*^MH>j{Sme4t{33e-rV8rk9L_wI3l)reYqtgS5ABt@tfN%*LV!EAv?r020 z4Jj$LApfz!aI(m_Ll*F61>fV9pC4{8fWU!vO%aWGT)6^BJARZmOYYR1O>!loX>;r~ONTJ)w-BZZ^A>su4+w84 zFIWRp0ww@%9?tIuA7E)1a{x39#wJrE@vApJDabIHe+q6;1*4>*B7OJ}D$NF2P%#ko zKtb*So25Na5y*5O)KZ|HR{%E%icT6cV5F@41M~;bD*`gwTyR|A6KTj3<4uP;>L4tw zi$Eg2Dtg+>bKwn43GO4f?B>?i5TH9`mOjXefVAK}3rS0RfWc*5gaXTb{9fhkxzvjS z2%D{dKV`jv>~1j2V8s3W{94h{6)>zIA(A({fyaSMVz{5&SbWd`)E1HjkbsSDU88ol z4@!^(1Xx)=!o&jJU9=lfT-jHs9y@_@bOTFR%sm;V@eukO^AYMYbGGpw>jV%$P1~?hxb<4J=Ihap?azIqN#T7p^It;#M z@tmES0tgVYZ2Jg2MTVb0SR_;dN+uqV0HE@_2BZjb&@9wo((iNaH#UNS1JQf8zXghs zU{*Yq3Iu?d4Se47L)<9L@X*lEHuFooRodEJ#*dZ$p`oE6A#u;PA#vn!Mbp5*z|=Iw z$^BB*wab@p#f86p`*>@m{jL?k5to{&si~S;2*#!1+qdn7UV*DuuMUj^uEI@isx`#M z#zH!Sfr$`NuZ(f~OV9Q^FAiD8PMKY&hlOT)oqh?U;_R27k+ds@7=)Eu&)^U!79wnL z;jWv_&a25uQ-{gX(MRLI{7}G+pp_)$J`!yQ89$suWwC{wjZF@9V0NXCpQFO4)Yo=> zZubzv7%q9??(}(_fvPOh zBsw%WILcPG(LqyFvsk!9bch7I0Za(Qj&qzkp-1MTX8KKh=YE|KMsXo1d2ha+b*1X4 zHjpp}?$z|PwIocT*_kUpDmGT%7;AzBRy{p46B8f*sIcVBP5r;u#PKfN+&{2x@D0hv z^#`ms3(zchKb!(P)*b5M^=S&p?O5yzu7#yrL0S`6G`nu~9B#VfnSa4-=y|`_ud#9B zo%5+UuN4@Mm|rJH&HPUH4z`XFh-=Dg-s7&G5hp8h3D(sgp8vJQ&(x9u$_v z%XpOIc)gc64;BEa*s7CrZ4L@;H|T?fy<)C31L6WGm>o!l;H@HSkH+w7{=y*a1Zm-Y zRZc3*WWE@Vw8C1Ic|J*WH1;_^2Pgp*r|Pd_Tf>_-Z-Unf3m2Sgb-V9`SI6e&AidPa zfVl(?F4RFStVGj{VHq<)5(b|E3J3!6I1d=y85bb1YQx6WnTau9FCs-3^KYhV;aE^| zs0_WNGhdw`43-f95!|$L#iKi2DN0lrL^W`Hn8viUv=gAKFke6_dHV-Jf^Gnn0@Lg9 zTu178&*i+s*@2Q3xU(EmN#4q%So0SnczcrmXk%izhAaey`)s2>>ez+*q2we!To z`@RpS0N_<2THDxQ5VJSF#yyHvGOz_-8&?AHuNDf6kB@)wS)_z$@S-HdV`sU0O9vEd1Z2$bZIp@ZwUUKI5e~@2|-OvAisj)98L-_k8w+C&) zp93OgXif||fZ+jAv)-yG7)E2O?Z=6vPtRJP;4SUES6>(3Skr3;Eu_ZUJ zM#w0VF9c=~HZQzoCTQv#rB)}OE+2KM-21KrVZgOJ!*I)K)KK$9{CpKcmc^ZukW{Ppw<&;)_}+{MskVx>IryKH2&}m zBQXpemA<&;TFY&t5y}>NRab$+gQv=LN|1@thB0O8B%L#1Wr8wc60`rf8_r4uCcew+FbH)=&=^;3$bQF3jY)mf zgk!M6MvtN0y*RNvD4_x;9w@K~-KDG(gx)9nRRrNkwlJBAaKoY3HsXa;gnW;r{_!j+ z&qIvUxzgvys$=tXN^T^&M!&FzzqaN~-IT8?zPd;0JyTlwS&n~tRJ`J{e@1)R{#Swl zj#@W9AMLJD>igt6lcUs~e%}%cGPmGS%D)Uxd%W_zRYro)mvoX*!X4*(%i>f+s{O3f z)Gm016Hr{J_B<-rO2^qI@Cq2(xBO72KqQM@QK*YqZ_!`BALt|B^Qo1LBUtQEkkjO8 zF*%0L3#eM%y?eL9{`mJXc)_0NrHuig{GC|_vfQZQ$IEa65N){ zfPLcXNr(&Ah=&{w20E&E4wV&FV ziBw2VHz@kjY(e_xi)S6Dw*Kww)VTgXD>Vpv>h~eaEdOczOeLRi{Qc(N9k2fj0d>2K zzu*113GG+EUux_}rVjn>O?*2y_M897Un#Hlj5ve@l#E_zs83F>7|VJ8(&n^FTJm1x zuOIeaEGFCvy@Od|Xg2Vm)7Sj$VhgVWE2@Ir?Ym;0{r7c6@$h!ASyok0CU@6AuNxj2 zDaeS;tCYa|p|!hGgA}f;NB-UQ`(sDMcFa9I7swi?Jfm0lEAahx6gPG-EH)1kmgzi3 zcBI%+T8Ezf$b3__b?%tLe>B#hnX5SVfpndqH>+NY(FJPz>vunVX<5sBktQ4;K2J?O zvytFxE2JD@e93qB8@^l0Zl~@_mrPP`N|R^#M@i{E#(1ZIkbtEtHqy;PQY*Kmo9*4% z<{R>#6-oN&H#sfY!_6Y3yg9p^Ka_Q|4>@Bo|!e@vZH+WL_9{5Bj z3Tw`Zsi(f_Ie3>wq#RtFB$@m(i$+`7k0MEQmA+^ySV1o9hbYJ0Hr*cNI?p*?%cAAV z=xZf+PwbDyE78Ro8D^DI+D^*UMKn1b?}-|Tz;{LVtzMJBXm2H=IoQ}ebLlq z!%AG;_%WC|XU? z0Yj)8%@j%)c4UZ1oRaazJn4Ca+*VJ5BAM3Xg}`e_wX?S*UVkM}Pc^0OCB&AYxV&eQ zSoL#0A8v&*v&Z0vP7oWF?tk4e_HOpx;g)7Thkl9@q#?kv3kH$;=#t3sJYTg3M5cP( z+5srX2bioH;$y+U-m1Aec|P8Z8!6^A-lo)8t@fx%a0Zn zyvF-)hCK7%mmcdouc7Pc1jq+;3x!2SnhM2MR#t-27yMbu zfr}-Ff=X7G{|XB;oW~IdvZ_8C;bOnv!9p&NGT>q-sSWN&f&D+I@W_p~SLXK1)ZKtP?~uINY*Z zUH75FC=Z1Ls4Xl(Z$UKj^1sL1!|~wUu0Z1rvAn;%EiQ;ZqgOE5<8YSCGAClAW51OsOd%hH`sR~!o9J5X*=P>F=GD_<;&SQN zV^Z_3@SIa88q0q`9Z+F*zS5vYCeuPMdR1!7bzz@RW+1p*X(oq;jMJ90nM4Z9?Z{(E zTjZMbvR7}KvsFap`;)=vKQ}~$U~S@Sz9WiBR=&@TFHe+^o?#rDr6(XG`2H+MvtXgL z_eX8Pe6!*$M)k86^TE^D?}GSUfeG9>XyKBFkJClzkq=&OcOVo)eM&VrHu^BhW7pU)$gr^LPD4zGW;8JX0 zkQ!iBT4-PuXOOb+-pobNv!{8-2q->THTBkJtc@VodORP$RQrZeN{xajkEZexVN)M! z1|>*XE9`LEL>Ad$OmhwS{B^4q8NvaV^vBx*Zdo)DV7J@!Gix0+$#vvw>tKbe_1#Jv zS`%kWfu?v~olGR?la_^B@)W)#)I5Wgt_8b|)8;P0*9bd8DdfBS-V|M~`0EMtrV!a> zqUe{|B9fOz@z_!f+)Z*!dxljzq)nC&&|JPisFq;c~2bs_}VYdOsu9z(s;8oZll^zP9c2Uw(t1Pq^6Xc~hN>N;x;Xf6bqyx(=EU z-Csd@$&+=U)~LjFCQ*(~{}8zeQcZzqYB!AcSeblRAWQuHVbRSN-L+WM+7V9_A=5Gp={(LkQGPE(!?5|7` zYF@4|eV6ABzER@o1qGs})-fjod^Yyjrj(p@#4{NQ{#&#@n#TG`+JaK*L7xT}xW~Kb z?Nnlu@--S%EMm{(bap&tKt`KIlK4#!Tl|n-kyXn|w@z1|5gzP&6TSD;V+b^&(~Aa! zpdakqQOi}nhLwg~B-*0C(JWYRNl7aosq9DNU+d}N9J0sZ zh|x5rRl%^+x)!vI749vV`IsKKk{TsQHW*J*^Cf?0kx~kKzEF@aF4}y-=cO(;Eoz>$ zk=S}+Z$^eO?}v3ze64j6=@H z!`T?vKismYV$S^jdj&8`G_NLh8uPV`_AyJ`dqu*0K3>e};z?oz#duc~8He%Y_qR~J zOC7@PTY^7-chp8Z{}Fq5HDkMnjVxY2fBfL7{r3BFyvY0iLowq<{zDcsS4LVl4#9t% zo%r1g;Ogr7m;B-3A=Le}>G{w8{yK79KNK6RFacFeIToBxzBb+f9W4|>&?W%QlhF$Q zh5DXtgipj8Xp;J|HFDyvF*DL(>kw_-S-augZ$iSnOM6R zkj93@#eXjS@$l>3(w~R;f1tsRPDif#VP8`f=#q)qPB&VIXFM<-B4X^?;i#x^kk0kQQtDki-}WHHc{nZ;-OiaQDm)(Bzc3S)Jk!mif>EOQUI%CB29~Uq zW3KP_PSsl+(X;iWZhiiSb@XHZc6Pxhn76%O;>)xS479r}&p$#oJk6 z9^{_)r0HtC@ppoi$6;|~%;yxUM$4`$PNRZ36rXrh8JDsm&wEd*Vy-Ky;bY5K#p$Hd z+ypCX3(lrHZ^86Das32i1; zMX~inKd+aU7R`Gbvn$6KL8@id6Zqz#(V!fUOD!*YztZ@E$ma$sn|2eH6d{Jn@wwXd z`DO)j9Zw%!b2mXoiTI4Ws}COG==XI%(-E$21be@r%ST;$s|z>| z)Kb72d^jrfXQ_N70x|@KIy5X*g@$5*YC_WxIn+vlm%K$9?quS$S$-O&%TKLPpsPLfklAl=A+$drYaHipy-}>g%4zqDV|KBP6{zvapo-N4&+R)ZUtc zvHnVpmZ=3(Pt&CXDmHy+gS@rmW{XrSrwlIlVL#~Fm6>vspedrhYT13q_}=1c?%9xN zsZ&wbj#jxX*35B`3;0h2`+H_&!g@YTOsQhtoB2=m zb4@?ST-gaHi$zK2^i1{$lWAVi7Qk!=6m@NQ@8u~(c{M2a=pm$^GLcaQxp zzr|G=ay-_nZ#sOad64VUTAFTBANaWkeEOz4=SIb8*#9gfK4uA0Dk(Ir_}yCGkXva! zs1Q**$%^0EMVC>6m~CBG==PmE&?Ebn0=I7mYCa-%!-ud@1`6S2IG;tIuOyP;A2&nK z;V0ei!1}lt!D7?<<-r0JH@MW%WarO=3scNW1fgbF_T22+xz;~W{zJTHm=u;IFm)R+ zXXoOl{9x-K4UKNT?VmiwEwEli4vNbYEMBHab5qkGCduO42Kc`(q){~)j1>=A0Im*h z6fqlYJUmZE2KAs%g_}@4NNxX*OfeeXt(1R7l7rbcu(uavn}l*XJPR~($3!1Pv#&z3 zCWsLQ$$!O#scUb7?icNV^R09EGteEVlM4Spxr1Xs>i3oZelB->Y!C9`y_D-Z6plJO zI-r6O5U@8Hp$`2`(2TT2X~*{a*qR9S#Jnzy|&BI~ga`w(3m#-Z}5 z?|tsyA-@YJ=aDOEF?O{UJ%nSNxig1(BB5NH94wdg}87LJEhM;;= z${-095Bh}V+H#eFI+@Rl5mYx9?q>|K^ISI3aO3RizGm<;OzcsLMpLEG3x}f0Ql5@I zhM-FMT0obHvudo?9G3^sjM6zps_hKOs5fSGUmZ>hP(-C7+`G45>e-E-t<@5sNTO+8 z#LDI%SWy#oLWEnKwTuE8D8i*{#J+6GugZpvCvWI%h2vXY=evbVtI;R~>o@uCje5)H z*=P=BFt*p|sh`^2n(Se~*O9xhJA(ZsTJRT^kCxH+fi**`!)*3k`liAJ?Vd2}`3pa&v@vtgXNza1U_ zqt#nx(5wBz2S77hip?%+TYP<{7mN5qYqE zD`06Ym~>llEsg58muKb3(_P+qWv4*5oXTj4l8t>N{KU&vF z1eLqaP$@LvGuAfuP}+NDe1Dz3vhfDaP@mduyK-{02eu1rW`Wk@%ki|ZRc}-%ZU=U7 z#zX3SXa0vNuOi>FBoGo1FoWL#VABx*#VyA%@5W#TE)R+4lZrI0f2`QiweeZifOqfS zK>?GKiwmGK19-4A??@GOmuy@lY){u;sC6hu!ocx3rH*(yYFbRO0U>ZM+!k|WETz1fuCt^1BOK%k6Mv9+t2pf0tx zdBEOG7MU-{qq@4dX`D^UtWLBsNr0X9Dg8vq+b|lXK<IH;-yurSr?a>#JooWfK+D{0R zJ)`AjwXiP9cINoO!x!~KUmmNOkRA}=>E2xZsLhHTr6i(^Bsoj2jPJekD`LbKtS?+G zcN@H`#)byyVuoE1Prn7xLCu7JZTe5Th$ICs3flfnfoei^F-85%nKMQnTiwuP0eioo zoiNJgzuL!H{bieFCU(4M&Ix>|hLJyR;or~idb-0-QX!}#fR|`)W)`dZ2}*D{`%KKf zW@Mzg-jL{brdXYfkgdi*yAG~@rY+H@$u#M{b#L|&=qVwx`}r;Z;y4ZCQB^-UX{`ye z3Gd$3e{pRsJaUaZ$P7;K-F$iNeQq{w}6uW z_>x>+UZv2L5XvHZ+-H%{y|Cq^3^QJ0JD~GFqyhKj@aq7uRq8`@G;FbHgUe>H(U0+85>?h1OebisIU5ui`ckNkUI_B07)Mjg#xpAtT_gUacT%8mkJAoKmy@y) zI-66*L<>!AL+9ULvMOMsCsKPDGt=do_%FT(iod2 zNcOh)?k9BfBkKIE@j>MB*o6-$v}lfYMrz>J-tc#!6yF7otOvhxEc|P7gRaxMZjtLo zx^`cQ`#i&*AyI-bCd$jEk@f$17@zGWb|=#%H?iT$^tr|h`NOfS;$&#s0~QWs>)GVo zEj5CsSX5)Jy$DiB?zz>fSnNG3hLo>8vZ&QJ8H5E!cEk4GLs&xvRBjTX`hIO7trQ5o zdA3JwKH{GRl*x78{Oee#thech*A@_KT_O0VR%*7eDqxl%Amp@b|Aq+$6#^ zs>06bKr6;)D+e2;x9g%9=5(lprB)hu(U);f-GkUDHzjxE(a`C-=GID2q}SIEvbplL z_79h5&e^TLzIhhgWgwH+k~!a5x!kb-kkyx)E>%hJFM0Rz_pYD1l5!OA8C%*vO@BDq zvY0&+VfZcUz1Ig_UK7ldBJn8ep*hnc);#Irt^79+vH4_JNlmN}qme%W;KXILjM75nuA`Y(=mS>4!l>FWMxAA z{dZ@RLrQv&SU zf=aKmLKAKYp#}-me9!#eJQXnJt!~z z7nU1m=!2)Bq74=ve5Rx>IC{&~_UVXR=JxR9} z6rK$f--90u`CkKIV&mt}pRmD<0{7Dq;P5Sg)t3i22lxv=IpX<$0crREv(YLE`xYQo z#f#c8$0OIq??88Dxb1&055x+#p}|h8ts$3@VAu}~8{52DE7!g;K*JfZ&#P+}|E=8Y z#g%)x5!W0I{LZ=`r6Nv_|2!_9GUfkU8NA@w)o_eWdwzn9Kx^av4B@t&&jlq1dw`RlVby>l5S`J%s6+K7HO+B@y zj_qHqCyy}`dmEezce*;nSEEIQecJ}Muc=mTH%pBY$6HkGzosHJ6v)QP_sR}lx|&WS z8mA}VYBDx2j)vBk0RpAW8T<{M+-$E>0}?~K3f8>g!K(u(2f4V96h5_j^Z4>-XXYDN z`*H+6u|~ZeEfZ7(VugRUNd(zS*c33@v|}8qx%$Hz*HzI(nEtsoqB8GuEw4Q8|6^@? zw771O==jk-f(hdDX!kJ2v*f_H5((V~$>Vz_Helleu9>2-5pr!YR3|0kLncb$S7P8L zzkdECFeGRu;k%+=LZI2hZhIcO(LovCiEB7V3!kTNs!@7xo)D2ji(zre(NAyT?dJcc>Dd4NyQ0ponYotj+@bPY=@ar5w56JqP0J&^HX}(sawreSLv{5}F~HULufKS=#mIpOy0+qf7m_HmR~G&VNI z{lFA#{qVN^cXbP_dw3zBM1yOJhwcA`St^Q(V$ezsg9t|nXVjGAe?H93I+*DifN9`qr$wo)U@(? zkMIrbD8-Vqz0;<;on$)WL5DX@7VzptbE`F7e6D>v-87P{U3-<%Df64u2_quo23A(T z34b!-OorpemlO>ll4N6{`9a<(uLqCXC#OC|7q=N-h~#_BZy~lSrI}6(URn-9P7kVy z=w8e9UIt`pxS9TwY#M*%CZcE|w=J-t^SeHX$$i}NcIAA3xb)WucNUko>%G{8U=gzp z6m_%Ebg)@SG6tfT0Fm{C&Hzi1ddUd0MA*CFTGap0?mBT$Dw@W>$r5d8*I>=uQ$VK4 zP20yRtt=f6b~WEYE}E*#sZN?K984=NDXw3n3eM+qI;Y9Q>rfUx>_*s0f+npMF-4); z2!f`jQM!#M#+U}ENitPdvi6aqosM;&5*h9TzS*sORtxV)@ECMz!+HYc@~hb z@@6ytd{G;*QfunQ4qXWEnSaV*?rFp?*2bN~8P#fCl5|o~5Qu#8Z^yx1Xtg3@7N9Um z^tL$f$Lqu8ILoKHDmGln|B(70sU4XmO751bE0pr-lk;2$5Mg{m!h_vi?kHvS4ERUS zV5cvjA9&TO&|vlM+t_a*$onbwPt-&@>{ZFa4-ZJeeoz2kD5ZyjrOgX{yzpaIySs(Z zYzQS6T!%Tv`Crg%!leh|nw>vgL>lP4))?G=lZL)OMf$(jGbkoDpyQCnLgf$J|B>U~ zn*eKh5q?HvTlr*fCV|rGKPCMLJ9l(+w8(GgsILQz_Hb+JKfHP`+|C`gzK_WH5M+S| z=Ik#6E+M7Ouag3o5r=2mJNvt`Ge3j+bp}dzj5`gb@(g39S3qE`wM=Itmi$ zpdg?K1hCMnQlx`Y44{HYjWiWes`M&FX$sPdAOuk9#Y8}wG^s&aOrk)jcW1=;=3D=# zd+)jrch)>GV+<+h?6c3W?ZR_M99DEgwI1x*#r})sn_9ciDMJMk#-dywJ^zD}5*G`-r4}+RxLUzvF{j8hYx340y#+vt<5{vmykn~S&?G&8ijW+}JM4Y(J zpZiX5W&w`Px^A_nP+R7ECI||I4ex`70hR3B1da%K$k|Z;w0)nsj*oZWq5vG{3j&U< zx~=6cL}Uv{l~uynnSvA=sAAk{^Hs~OUzVe5Bg^-lK5l3=+)**<9pY4p>y0Pqh(Hr1uj7T>>E}AHwx3*5_R6o}=bI1` z6w(X0ZsO$=7!YXplw@$?Hhp9dXI^~RB1)1jN1#DkqV@LS+lITN0u?px+*?mLeQD<+ zw%MlRwf&e`l12;06Az5@$DqRHyw)+ zWNYt0;rL}mZN^h(j*X(9C-cv)UaXHn%BF2>$~21c;xq5^?Mb}W>s@tj2pzy5%(KTh z?sHj3eH2kywozkJXykRb@O-b;# zkag^dY)uJu(>8saZW(HRLGkk2`JPH^Z&IgUI9il_WRbI>^^NA)tnbnfuVZ}YXX~Q} zPle5$J7|7MvARG`J*amwi9CHn<2gq!L#Ja}TFg*}Ku5`&@5qz%`E1MX$o{A#y5gWT zKUp35@O1K$bfb#Avyk8kLPJ}sVL}mYi>xS*0$xmJf364lSvq;@v~CYmA(EnhhW3;L zI(T7bS!uSkQ5eT25=-Df70sTqNR#uw@%?AK3Q;||uy_1dAL7@*xB;;%k+MdnW2zM2 zRv+&e89chZ>>n)p`i;!(6Wo0$cwR?Q{PklM-7ueJPsy7Bmucx2t{-nz`E>CmclizF zyvxdSv&|BB%G7R}v7SY0Dt<~Yf7pk1bPasn_GL%6&4s_h)7j%#{X&@8Y!A0TDSC)l zU~@AU5Yjs@(egT(dZH!)A5R_wbV1ooX3!;>*VUNiC_?_ z@eFNv!JDJU8(IvIg@|{f7cCpxh2SC5&CLT7lfz0RA%|;3v&I=TaXs}z z@TpWhn}{W`iZ4#cB3)HF#PF?z(KJ!C)-y}m-*)q<@d{gqVCuJIrYrDVS;2<$Y;+p@ zaoQp&Ohz)pjdPT)oaZ&h$ndPv7vsiVj3xSw^<6<(-KA*(=4lmWWncRG;uqA11}|he zU#~1CbCoBCKg;NhS>d|cKfuDrZZsL)BVDgOKg6EMf1GbthTWRnyw7&AJ^uINdEn3> zw_FQ3l~fN=6Bax^Ly|RhW(T*|)BTSV%Xpxe*6<2jY&3hG96CNbAF7-IW&snparGCk zOo?uvaY42ga6V8&gH$-tJP>+>ECUr<__0L7?$PZhyOP{7=hDh*@!k-h19ezy zO5bhe$5?Q>zW0;u$H^)X2v23_i|T7BpPU@Q9t81<{Qx#nQ{vWp?l->LUiVj+Fm+tm zk~|lzGzuin5R_pI0Y~@n28QMa1MjwvJN*NM(KPU!SG;^V2G><;@?xnAT+Fnzw8Fx| zn3DfoxeR`6ZcyOf<_u=uMR*sBZ;01S_u{v6WUrjU6yo?b6MlTYe3cMemaMyABb@zN+BQM~SU9c~oa>{ru%~w@b-97K!V_-tZVq zAO?+z6Synk1yOw@#D=5c=f?wsb+!qu7eZnRerKuq0p)K=pRB1~j_FUrqbu9L(lHCCOI zvAWjKdQ0<$G|9Zas<}_H$s8>xSYZ0{C-HY>aUY3^%8ZNDf%3z>h~>!UQBL|ekD=LH zV;Pu&Prhn0a^&TXL|iiOC(k*>%|ZXiIXr&9j74unaq)WD=Ce_ zjcgrGBcp7g!XhYQ37J>gpCZCZEiFbe8r7~VANy^Es zYu8lj2Z#N|dMRgo4SmyQo;{n)xYc7&{P~cFZ$(&!M@vISaMX&9u(kTFp4j3mrTE(( z!}nb^zHr9bZ{=!q6cyW>HBWOZ?xVlMUYny-VBNsy$Xf2-)#PzA!Lk#bYP9Y>xzMzN zlPxa6Gx3#-1R~M+fJyu@Wt>zM39ZsexyO<-8*G^Boy!`)T#jsX|G7pzV`K4jac#?C z5=Ot63vsC*id6LUSk|E!fy1J|irpSj1(%cmnQI5}$O!aY2H znzyc{Wg7l3K#W*v1EN@#z)3!>#-B22mUe(UTPH0Qj{-Mk=fbv^VMo6)+A_@#FFQ(s zJTgx2A0GhhF@yLa-L|yNpRX1c7O1&i61@QV!WBDl2r{020?ZG_A~1~|S=)N^jZ(J> z=1zM#FYfJ{uwgVC&5)m;UkK4%@@w;@&75959?YwKSf{G**d83Ni^Q-dJL%Ow3jC_5lJE2M9W=tdClJ9J=7%fp zDw)`@bk7K^MpnHcY2*X?SFB4^CAvY(0<|l+Wuk6q>3m{WMrEx znXrCw_()P4+sW=Ik;M2QmYzhNPj$kD2ejJTo{PW8nzFu+*AjF#-*XSIcc$;*Y{%fT zyzR3zH|RJNQ=>m7Mg%Q{Qamxv*^{l96B;~+Oq$9hMxalo)$wiOi^V-_98d8exJDDc zs!mlykNg*R&oQ(I@yjGf$~EMx!--qmu6mgTo9D@`*jFrnx4I-_%#aev6jMN9xr)(` zoFC6(YSkeQIhFPK(bNXV|7!Y`=_wx)BjeAck0eMKMK+Q}bZf`6g1;m#%ljMTddlOj zbD|QUNEm01<2(bHab&t|M$_#^IL`HZo(=JSt!vV?I>Em;b?O#(=Emkacm(fH#Tq`7 zTQWkdPH=6(GSr4N?&T~JhZ82!TQdH_iR(!>@0s?wA%YbVwFUDoAw_o;4|r^5%(YMN z>niF$ z)YY}DqCzT*8uPSS41w5{aQT~D0`>VV2M3@rET$>j7xsBN-8uy>Fh_yn7P^2mxS5%K zuU@PY#u+sq_ZT|tw^E;-o!!*bBsJb04_R&>8SIT!UdI9V26mam6ipss2@&lIoY*KjVbM)hNlH1uK?8nx|3h+ zmYerReU|UszAZ^-4T2+T?`myZ6Kj#+|sK_Q`jNdgDj>Y>L}Cu|Aok)jQ8iy5x|% zVQ03_zeA#sL)U;6Z)413V^}-aIe*a5LR~GlEmQ9HPIO{_qC}yFfpv1EeXC+kj3dce z&RMgHq-oG1k+z}yJe#LCQ7W{fejLh0;)e2p#Qw$Hi?u{qGU7~yBUw9gi1LNwj?JRl zBzioI8R2|eB6tiLiNi>w8JRtKWUMsXhGJ<=F^qC5tM=q9ltRIY&&cwj;N%sfNV=Td zAgh={&z)h#KgTH|x+R{eN^OR4dHBVX=Dbf>Ez*oqVC6a$BJDqQI#srCQr>^+R4QZC zJ?6q02_r)yL!*E>aY!_?)H5LM1BtOv6D78MqqW@i8j0>n)=_E?;9KlEEeKbYy-YaE z$aQ=v?mF+Ozbg!{kxWn^?;-DaY8ob39^sVvh0(7CNLRG-+aqxwjUk9hf@qHT8oz*A zhl_KAY_cAv#5)W@EHB`qY}1sn#Ywi1#0>k56x;TfWIs6@v|gu;khKjMtUE@AzF-*R zbh$gh=JrB=UE`%s_7i>(s^sh7t-SShp|K&$tGkl>`7-ubsCH^`nN<2D8k=}To|J5y z{s41S*vsMuVwod8#lu3?7ki__GBkMVxDvkIJTq6=0!_-XVl??eVjS!wgnU7T7^X@X z35`l&ulJZ@#Qu^KAaR#A}jIu?X7Bn zSXlK2z1;ojL=@7c*k~TMB8Rw*Y8Kz0yMj{JqJqLgSvR$~C|#{x8zxjKOl?B9{RvW? z+F}G_(?$y(7F+LsrtWxvVQ5oYUoI)bE2mzLxn=IsG6!TrY5o<+j5}wd@1=+ z<%XY@s#+d-f7bPyHKTzEKiR~1p6H403K?<^u~jC9 z-}lO)6Y|u7;|nn*0bg5Btv7A z?kT_-hut*8z>to_<+P)n5YX~U>-kOACeTXE&dnXHH-d{I_`<-!zXCZjr&;NA(UVnw z__!DBKSKSeGbNznJ%^hMc&WyTo?@)Y;*&o~XPx=4+SnF7Tb2G0Hd7~S>U7cR*LX(RE@Dc8pR|I~- z@8Mrw$8D|a?B)OGYlJHwHJ2Xlvy;sVn?!&$7v{ZzFwE$*``5(~xmn9jPbKuIVqyR+ zHzD0iRyN=W-0C=`9L@Ir>$d9WCx7?bdq~)6sG$ZDQ-y?(izIyZ%yxAZ8sAtWmttjZ z!QhN=xupbKW2lsn5Yho3s*dy&qgJ<&{N+EDIBl7L169f<>_TwWyFzv!WSS5m_t710 zoIo1-{HxT%-By}@;q9O!1e_Il;o$81ICfX*<{!1F`8|s?SmdWHYmv+$(q7XYiDbCv z8aP;`T${bM`E`?%UM=d9dVn*cBvaM#yr#XN<_*P1d(@iP=F~=F;@VYXz&Tui4sU;X+V#24Nu9Yy+^mm5;`(8)JVEKhleP1H6SBiG8_0mknnQWfqE()?zuP2Zc{7o8 zh@TJ|G5Yvy^ADqbKZ~mgwymM-vqY!w56|J&8w{Gy$R(V&O8#ri+8Zk(J1y})@WHo# z_Q#$M0y9zE}G;6z-df<>?DkimxI-sQ}N zb<8<9=EC53ZNeH3#H*B(;mCWi((+H&!Qou(^GBE@@Msx72;sG7Adm&Jy{bGmSDFZq`cIHcD_oFYfmpe4*CMd;4WDaoM(m_^B$X6)b6wUnD z-OQr+<*e`cfU!99Ot9d?0?bF&sSb}rgXLpL8@#N-VpK|nLi<=se9ZY0b#a2`WmTp2 zHi_}jOwG`^NEUO^%r#6@hu-_m?mgI-Zxs<8U1aRwB!<2Kqj{`voTMuxJu;(TI~Ud- z>48gIYw-w6tMkmsFti!*i!2Fwl^Gma?P*SN44%w*ovK8_;!j)bL&mF;5&J zlzRT8dKW9rcCzI`k~l0q7rccGnMt%y*C^%h|1M?JKw;;|dfL)csa}h%{S?@caD0@y z#*fs7_9%znIc7e_pP$?m6?*SdV)O6*@<9o?R=zMNhuVsXLvm2PSaQmpJ9lF1{)Ae* z-Z#>RKP>n2Gn~+Vr06t&4rOIQTVwelXuB-x(-Y>%Mk4MIvx8cH>kV}%Ht7$GQXAlYmg3kV%jnkT^2x}+YQ`124|cnAc4`K?cIrFo7ArFKFRminh{ui1+#8(%*! zYisKQ>NpDYm#ZqVe(Lsg_GRq?^nWpAm+jI#nO+&X-&m}ed;B7&E8$FLj#!f#J&%aJ zy*u2nxz{=q0-utm0-E%3#1wiudjZX{1~T4!!ejar143AI9H~f;5I>b86dEX^aWCKu zf;%X45?S0WnU(vMBkl^*;z&mH>D@BcHj%tp`U^xiw!^N^3Lgt3%RkbOnmB+^Uiy)P zlG1vaJLESa7H|`PP|u-psybDb#!7@(H+k`j_;hsAjhhy_KqGa?7UyAj8KTuDZO2nu zI#4w~$}lnNO&p!E9u|ST3{K1$Wf-#E(gBZ=83akZnicCUk|QWjGFBp;wwU!~P7p0p z9cV5M9Ovb_hfhIJ1T9P=*BJ}>m4x1UayHIfBqm#VPWmtl&Rt--zfZW<^HJcc78Ogs z$5JSXD)L6RvI)QN%sTJ?ZqrL-iSAnVIFl*QZ+?im)OAtv&mMZ!{0p6`kiG=)^*d4@ z{!N(M0h3W|e=8e{_|-G9IoXoc#~+{J%M!618JkR@Cp|BkX5|c7!wJ)tiX=u_%59#K z`#hbPRylh^+@gG(e_3dTr-6nuLG9jRiah= zF()?CiT=*7M09tAazemxcohk{c`R1tY<5m2yG>FTU1`Kx=R8r?Mlzy9qaGn_gHNG6 zX3yK{_MxH3nf}l{M3>vk*Jfij#$61IpA%qd0;vh;%%!DNclzTToELL*G%iQb6u;mZ z;QJaq@pOMeSo(D9{$d+5l5{4Fh?(S()S(ycxNa;Jv z%3|*mQ}~s0qvCXGe&k_psNci~@YJi$|9sA3lYk~)#0Jt`7RTxbV956!p1jP%*atnB zmh+Y5`^dc(b}rM(PvRjj1Hlc?G5pKUVSSx4PfS5bqj3mZoC-^>Ih}{Ro6*k>j|Nm^ z7fBwdN_mgS@%!TmrHjFM`CV*{!iJIeuVB2DX1`8MRtqY>Y!oh2-8-Ff@ResauhREm z!(xd9-1x69DC{HUF^q=D;RvJ-mAXtGXY$!6?EbyubQFHA2Nm)Z?o%k#Nu)f7DXT6?lv#5h*g7ZnP|I^+>1?%K%s&~|$wD@?di*1)Y z{av_~Wr(aQhrB@C47Aol;=I4>&4b)%cOTy&=Vtm&DqcA9d>fp(a8@1YcpFoK)^oe`qsC?JhwVDbM2SA2w;4}VezjpylilTlz4;Bs!D@3Zh) z2$LGA1ijtA*ooJvUep%lXs46k+8|8X3gwhrh4?m@PH?;NKO~H9AO6V=YT_2#;ZZL_ zjnuGfH&j}PEjt6r4**Nlj7w}w8Tu(_1a0r#0-^`>)7UikkGvqwrnYUd_II~GIQi2H zhak zSZ=aQiHP7Hdv+HE_OPJ4tIE|gVsq|dO0!)@crJ+usJ#tZs+wH+R34Vs`S<3MtF>3N z4;Kv@cc!Pk=h1P}iSK@TE;Qj-8+k8zVkM1T@>B~doGT>c`ZbF_B{84%GVccx%1;E@ ztaC42ii(WY*|_d*5pe@HqeLyYG%7enN{gPxGn{bSbGp>BFuLzx&b_8+w8p-9DH$*M z0kj5BRb(V?M0JuWP9%y~Ai^T^%gR;yL(nE6mpO(F==cd0s@gpPp|;Ey62Hzy6lw~P*#p{Mx^p3OupGESSpu1MnI9G}i6G_=530MS?drUB!aQQaH2wBL~)YbVk1~Y*ZVsl!m?)%8V z&=3Y>hoCHSThsLINIE+QA|?+7Yqclam%Ev8XrZ*akAcpo2@8(|ls0B|bv@};f%BFN z&<4D0%)i(C3kBHnFz{y_K#GF8&HE@E*x;#g^bow6`^ZIuHT{rG-n6NrO%zi+k)2ts zP61hdwfR={q5^&=ZGufxR%%D};$htS#kI}51}8t&ZkdIE|0)fX7NNy*$6~TSRBWhUZG=~Kj);{G>=hT zzg^%h8JFPDbV`6i#-8Fuy0r(>*-TC#j&)S+Rk}ed`pqI6i&2VeK_4|&(N`>S3~qNJ z=yf)dO*0Fg|0Inn;>|JX!7*{7Dv`Ed2_Dy4lGQqt95tn-vp@QPjT9u6Ib_kl!BchS`RG-yZINQa=VEgt#;XYWyZD)L@B}%^l%d$wJyW54@ z0z?CTRRVMhZQB2w3`DcTxBH96+Jp;}!5ax6hP-WC4B#FOC3$VwZ7skH6VI(p%)57g z(^No00e0_j>lOr2u9MTYfKi^R81?mPME#=%gS!YdFdBwZP(wRqz0x~p00O&X9G5Nt z;P3A>cL7*;?==X$)s|h5TuueH96(|iqg1PV^iKzmfGZ0GQTxp0R}6X4)qBDqmwksi zHeO-2EA-01cq<^=&Rdw zgD_xt%nC3hmGJr@J#gf|QIi(RGEN0-Jzh(v%p7W<1QAhtZeC2#vB=4#BY4oQBx3^y;J_^?bq#6}k3v%2f+PLi84;h35^lO;j5xe{ew@?8g zYg)ZsnTgcBCrp_C9&t4`XJ;3k_(6L`qw^}YSr7a8$?SUN9p3nifr*NcXFCqtWwLjP zwREsr4$9nqa7@03K(LH58%N!(jKrJdiT~7Moow*-7 z?_F2t|DystP zw>$})e(YnO%Pb#`O{iE0y<|XN7Qcz#%cgu&o*97FYpD_j#pG2v!h=chDw2Shf_r)75C>;t`{^i@ybiJCT=Mw4kQjwgW29l^IuovsEoWvt zF^+MwNwmZ>JEp|7ku2kX^{)^-d!2-31Dl8rQQdi8yCKGNT6P@0PlE|3?eDgr#;7*- zE*LnuM{(GBMC{wYH};~w25N~dr%VI2Hv|v#aiQn6@fv|l&#n>y!@oEB&tqwUM&s}C zJ-xkvjy~AAyY2pQ`TrlFq*Quwx(TPZHb0>jIoa8%fBr8#39iNOj*LG( zs(pUQ9d0o-kg!1Q3B!GVX28Dk1(L5~*xi)yXq=8YGW zy`-chU-I)&@3p~EzI3X=4**RNq@3zT-t(!IBt@gi*{*Qb#{4RhPzs1{Q`J(}`fU#oYR zmo-p4@qWSSdo(R7H2Me+PYx|F!CGykI;w^~+Lp1TK(h zGlDS43N<>h!FJ%&^Ege+VWeEz*)2!XyV3LAQ9%=JM+qe@@ymvx1@;YY^ddZI=V~{U ztH}jcHrgG6PmTn|4|OA?DS5;c3<2TBgfr*Cc^gLRHnx+M8BH@CN%A3+8J@#Tf^R&B zlW))pTZ~Z(C@UfsiP-DtV8bX0BRGdz0j6gvG*c3~_g$p#U$Qa{LyT3N_SKScD)9MT z+V|^nJ$)xg3_u0e6mq&MFfbd^S0OPUzEEz;XF%nFzdil<8+ajN z6Q5RIUcRWJ)t&c4^iObO?qgs=Aw?6uevx+@IKNFYwN>F<1yQ%bS&8ivZ--nwx@H3g z?7j0-+XUyA%;53k$Ny7~r8CXbB$_}a0Wu~6+eWL7uWpX3{k(Sf4~F_*KLUOZQ{rYm9uP%>Y>f&T z@h)Qk(PBzw{rw~tn25FV)VUu3pfyZSPpk5DefbhIM0vmaFb%hmtlailko)5PB6UIr zHNySmv-9K7eYv}v#Fc}y$vZ@L`>9$%FXDo&A1m?-w@-pOr)-jTSF-qezmGPpHXi3X z8hPS|szl2!MaI}-u>-)P&a!#A+TU$5fci5ePlQ@F-LBHVT=WYwtBKUh6i= ze<|d-4XxmA&1fEL@zi5}pR0Us$W`3Cm8P^bNa>_YQANor zN*5;V(dK)0rFg|O9OfB0&t0P2&RO>!5i6T!j;vF<7mPRHSzbx%G7i-zRs~O0E*q zg(4|FTXeng)C+rk*T86qVRlw@U_y-eTCOKZ^r%9000ki>IvccoFgrv6`ipFPndS8O z|H*qwPh6r!=q5U-mkxeloAXYzZIeyLNL=zg+Iy_t`odex1Gqd9b4YXW}u5W?c z&CkiPhcOCccl*Uc|J4YBj9;sJeLO@Ic^R(9ia#Nxf7H48I8(Ce$A@}s_>07(Y- zkK#`dZD^(<(U5Pyr~;|l=Kn%#zx<*=j0p@{lDBH0K?FBXfa+`%sI=iuu*SVDFNZUB zsR_Bg%h%C9-Wp8uROzy1ES*9xpfhIs*`A ze<$w=4mUS94k^cV$V#*{Fn|C>-G4v45Jx@YsO%uW0;QFzau{;yzws1UK)u)$j_u7_ zPHiG}O2PDQ2n{)_jH?|03AX>?7@=}#2&SQ@69FBe&M}8kR#XbFsqfu(1AHOpF0STM zAV^m?t4B8$feEAz8`{t1eHV&sk_1Zo-*Q+#zfHJEP)BTlNEBXts~n8V?u2>*^Q=SL zw70A03nA#_sc1aN_559&wG}P5R{N;@EC}ZROU=59DK2(~DKTua00X_sl_nT&qxKKd z(UwkrgYE;i@eBX-+f>mAhc;Fq~Sv!S)+C1M7M9^MlN*W z0ULOx0OPohmO{?0I8HlqhmM)sv2OpMlTDxwXu@}tSj&^O^XAb3f4zBtAzvc79LjLH z?j=PKkQt3e#GV`_fE?g|U;n3Bz2yxKnM*XwsX!%wPKQqvOC?|(|M1T{f+LpJ$S@R# z%2qEH-tq7PI1K_c*VVQD@x$2o)^4B_){I3zHJAqOed^#J)PlG{bF#r-@e4OpW4`o+ zzyMEycl;ffLygR4X`N$pn2n!^m6DuMItfqNB4wRuB@#}Ko|bX&d@Mr_ZiCi%FZ%t) zYp?Z58fP{`Rf9yUC|kFw6qTML<5%`U2_R_IR(vAJl)65%_b+b!v&9mQfiL3vKOdwI zbiLE>WhH)2;Ilwkk)Rg0(u4gxt*--ln+*{+&5L?dbV3V{pgI_I)pePsef6c3E@*L# zie;vMMQf<5Pug0cuoy=g16zBv+LOfll3thf*yhd=?x)*~l<|}*(2gfc(Y=(OxRp%L zOA2Ll#OLSPS2ye64kUWw%R=j|+!w*Aa-@iyZG)5`+&7h2&Llk&8571;2aoX+K<`Rm z%$fT&Eo$*~jcM#%2#*0Tp4ef4Syv(nq^^Z638hkHF9gPVK@0atB>7#4zUv4ovSie* zu5m6`Jux``8l7yKz>^4Dk<>Z~BWT;*_GJvAL2Q>g&g8XEW2y#wJr#vX$6W`lOEp~Z z&1bq2uVK=$*QuTotM5MzF!K^d+>J-0I--~h#i^i-k&KsaCTOe(-gsF*n2|8ntXUhGHmu);D`S4;7`FAng@q(dpjSKM0Ww2mT z18PB@z6_&^H$Y`j9MZm*#q6^e`45hnNSrsPj{7|dodeX_I#QeP)8p>_L+@O)x_Q$5 z56f3d{l|etu9Vkd2VEV|2f!M>^V^uSgfypGpyNQZVEZ2&6{-$@J~UY%^`bg0VaRHb z=pT`qKy(MeNk?3ZoQO(&2fuj~UiUY;ff1G%_9Ou+I1PT;-|OiiXsWBM~rLp$5ZMUv~|1!6>lnqlq;Br0I+a5 zwMMUkifmXT`p^4s&Dz-XhWrqs=Q_veN>joa>&@FP3adac0{E-Nl*awL(7wltlF17& zvkxraKSO7$EFuN}G<+##bL5k^-=)Lc1^a(l-9V&+qZU6X;5HQecD20{Rz+F+gN$F< zS^0-Ko$*N=J=}L0vP4$_9)o%EZ$aA#;zB8ybUQQl;0&8nu}P3>gjfrWZbYDrjml%C zE~#{SW)KFw9p?Mc8ZYc|;mw6P}I&@xCjOW9n|m^`SL zOKTk}a*;=nvOItAvIDz|PQ6pD@N`GBSZ3~5v~Z+81T3GJ5q*>2LC<}HKY zSgp*n??FUq)TI^HG>P_S6reMy3h}&MBQ&T%*{@8zHqIn$gVgRkDj?mJYoT#FiYTbVxH9R$uf}4T0Esp4EUiq*Cm4eRB z<>6tgqM2q@VAr6FZQ+PILv?qFlZz8*0!m29IY3d^ioGX=7ji`5v2e63b{@>Rn=N1y z)KIKGEo+5{lCLdQ>B~y*M+3kre~f zL?2rysE5FIK|%a|1rL@msvU2X7^MeOMy@B8E$^02x+*Qs0Et5|mAT$z$tn9S{di>} zQ;Ji~GM5oHLMp(Wq_LENpv2u69jEjzm`WhN!msdF$}50lCyJx{l@6EK_4m;3Qm=_` za#sOqOaYYv=oNG@HF9D`Y?75XuN>wd@aT!L3H&g1?9a{o<*PnCfAZZ+_pHGx1!CYK zsG#{@N0p_}q)8Z&12aW^f6jDfCGh{Lr+GGY3jFERwh#^iHOE5JiqppqQA04FPHY!T zeSh5%Ki^(sR1J(zjfcr-Fr(1Ib1VRwQ?Vm7F?YRxKh=Yj*uuI@!w660NLLp~C91`m z*0;WZPL$;LVf#+|UOO+QPCkr&YsQyCrenhm0dBaKMCD zH{}*;Oz-@~?+Wb)cvm{`g{93zd`u^cBhnVX#RI@}Fj#3k(X3 zHcW_m!%D<@6rFTX`WS@z;`VY?NOxqsF~!OT`>GmYD<6p(F`z3PXENaNfYvIivE7m< ze4KI`9%EbkBRwSEf`_ZI-8A%42p*Y17)LW+o;;lj4b~z?FiXUm7<}HJ=^ExE?z%h~ zjk_Kzl6uNCntZ<`y*o9s$_*BPDFE%Mqku1v%@_cuO;My`GM zMY(osuzDdx;=g&hCWZ|m(?8;kv4ldag@0%{vd*sy&r!bPs}r?dM<-mckAIwzN5vf{ zCwf@f;)FGg_D-g-M5Vc~rtBW9rdokg8FP+Nd@V7gNxepave#3BZ!<<&sB$_asz|@q zDfvTgg{RLFNTjEU9ARKK^(tOrA1|W204`o#oh=JaD|uC2Dxn$QIDe znu=H3i@9ccY`-IV>HE|BUd$uce2L!+3+nFn`fX(9;kpcc{d{M0UM(##{(kTak9C&; zg;cMlNTV|A*&tNk7fJnTwJcCnDzy67qJLUiY-ZRp{UFX}X77Bnw<0j`K)G{s|N2<% zJ>W0J+*|(HwW&Fi!%Z^7ATL14+&O^X}@#qCo`Bd+Dv zso^sL6798Filt71&YWS5c~|hKXj7JjTmzp){)%ReuaeAU1PRlq-|6PugL&8u|3%X) zs_F()Q39##3gn>iDw@~H@5lwF>dD@cjsgDP(RvJS)9_qsVNRp2@t9@7tIa=6uOu5e z)cc6)8IFj_IE|?uPb+wScL=SW`C7Aonca4X6nTG)*q~R6^-5H|+C?g$*l?b~c>_f_ z#Wsr0Fh}!JI#a%!61Hf(OZr&5-0BoL@0G?XDl&?OFzGal%np|E3e|$=t0jQBdQP2v zrK-e?%TU$6ToR0@G-VC1#fU@^#(57bZH~XQ2*i!sp8G6laX-?VQPk;-E~<#ch#R;Y zi~`!PYMdhLk51zb5wf616(JkR^8Ur*RJYDF`{*0h04AJaWUz(If!`W>Bbg1+6f}_6 zPn=l_=`>J(g%|<`J5=~As~g`%Y1!~ga_ie*@!J|YytO%$rGUZg(!eK`&-txwZQNek z2&b6I+`6SNIABrT1E)ys$J)@Vp@AP66OndlNDY)irF`K;x!c39BdJOQ^Ic@_U5X>i z3Osa+lx@f8lr3vhQcs%s&zk+4%VS=a30r=3>y6~DXRaY&(@{-V1S7Tnt?1rON=KGQfP+5 zQ=OaGsE3v`K3M`H1F20vC||5h)19+&*NWSQN0@T_x*}bi(TG( z1yGPRt}P-Lgb5-L?Il#FB=tnxWom7`I2sxkSm?BC`$i)W4Sr=jKyVvh`)Yf?e31m^ z^;W^V>596IJEWIX@CODa0Xn9>iCqVs3@a#Lw6s1!-W65d2ry7;Ajr4hf_ldPkc=Hj z5I5FFw|qZe+^;4`MP2|R@}R!Wvwdczw+Bl4Ed5dfVeY;7y~!4Z^|8ZLcX9xWML*zU z07f6}gt!i>C*tZBSIUtin$Uy6%*_|xqWN}gSPJS`A zWNS6yW6YVaEl_bbDchrc`h(6&No@g{>e;8P2mvC{l<+&gn_^bF8aM-#^Z+;|qgen~ zoG)(XQ&{8-|JznNi3R0C3cz2m`W_vzg7j?YnEBE{kq=*v4YCh1uwY8+8W6`R@To0s zP*)bx^DfDRIx`if`t`VNfc;_+M4wSCcqNXEFcQpt`V)lJxBgwnFG<`s`0EtB*-`lF zpjVFbE(24?u|ueJRSk^;^tSU(4i1QPkZ49MJlxL--@NitH^7GgBHGU1c5uke$x++< z`}_CX^j5kGV|cF`8XC^VKR@j2w@nJvOmA%xwm8ACMSoJ6XNxUNnJVmuxwAik4?B?# zYQ9Yh&JVkC*$#MYt<7}m#oyii5GOJr@Q}>R3)n!D=qjqMmP>3hy!& zq}it}FqMjhUGbaFKAXUId;%kT1!`xOP0~#m$OTZMlg^$Odt_u}7?u9o=U*d7|9}BN zL=|i644lHg1D~7kjMnzOq% z;)$Z~U`?amKiIi3vARyhdwrjC%}ds&aM8uY2(5PJxFon3{@+{{0s19381bVnQ{cb|h6bsAhHcjmNNC9~3l*+osCw z#NLs^L6{4JdZ|N!Mn-(W#6ztZKr^LL@k>vnyzjavmV)zLDy7D80!T_VxWV(mziZ-6 zSWvkE`d>HwoSuzaRcw z$L`O(?nUWgidfNP%IIM3y;<8bHB27GbfA z<;thzH+B^=rWI=4l1~qE^e!YMWy12I(I@9-dr=G9&lh{7lS#bCBAb!SmWPs=kpp+q1;)Ru_qj`H>etwa2Q4$U$;I5 zBABG-&&lihaB6^al?b;nCSoW|C2ED=P_?zA+AsfbhXaLRYO=W&*SnvvcrJ8w8z8{G)W3lwV&VgC;maG9@J9L0^3lSrVC0&o7O160SsuI+XP=x{b`Cn0bW$f==8bW z+5rm>W7nC!bN4>r8mxicX-(mR9S$B&NT`yBtTh1U-xU`VK{8E&BTU+91Os4%4LlgI zJ#Xkz-$I@iSl+c0#d#@u;D3V(u?}OkK92nWPi!+p0QdHV;3_T?SkK+=H%6uO43@%( z6-`)^&H{|H3-FO%fc=t00O+(vF2th9rj|?E=EU06!~p!!WeqmfN_{l+}HP%I$|6q?H-SB z4+{@x326iag%gO3?CrUuTOor4CcOXp_3PN7W|$xY|3GN($&jk@Y z=gys#8`R0TfEhzn~dr=?_N2fMqwhlY}khE5-$DJd!OT^ZYd?)k#EZxK;Zx>v3g zfx{ode3@8SCJ1-I^-LxGq?}TLq_?!RR8^(9WCx-3Fn`T+pELXb$;;4_;o93L$P~y? z7C&?5B2D7+=fbgVoxXA3bL?rxsvLE20(5^;^MTWg1Z9*=3N>^1lv$;i1i8+>XLWTI zzG2wbME!S)2>KvJ6BcN6Uh50Vr->e+xw~%#TFZU@@Nq%Tr!&`q10gWH*UsE?ZRrkQ zbPz%02#txU>C`<6q#PtCC;#}iPGJkR`h4);zq7Kk63Q?{VKz2203gdJB0|UgrKcz0 zi1^vFjcY{=3=FO;$rJLBFVo07aPQu|W5Vv# zV`Bjn|Md(0K#3-&XZFjNFCY~I$_+MPWb*{~;}!2sSn@Y+-n@DJ`VVRnH#_^REEGM@ zSYAH9;Kp&N5AGfwgs-nI$iImaQ-v5x^DES%UwWdfVNLb~17&sPd#dD76fLU2qrOy7f^ z1Lo;@j5qxC@=D9tT_IuNPoF+rzI3T}W64xSr8z~`!xw542ow@)t@rPEV3=r<5ACnm zB>`DkSs5AjEwm;gGAb(S?o*%_xF5TY_KPrUj24FnLAH&ItSl50X$6He?N_2OY#3r{ zV0y&9dJ*f5Bs_DOv>9D8+p&4KQAdMDJ*4u6lsNu4+;8KTAqNLXu~DcY^N}ABUXKqvombA%U6`LQD@fPP z0dCvdc`SSn^_7WmJr0MYdzdc|*@U^q3Rh0YbFi^}7rn4!M}jLS((?y&4q26ztT=_w zc9P@ep`)!Pr)ao6ukPOQ-pEwqbwPn6Y*PaRgLCK3fsD0YftH5G2zc`=S0WZUObfZ^ zx}hb!eEG5YQCL(cwz0Wi=N58QrdrrnXK;JtE$|1}&ZVu$wZ`T*JC z;m2i_JO1iOm0JVqZH2Wiy4)l*EG#JC5w#M00Xh-YkwAo&_tv?u0|R4BU*TJo3!s>m zS5%DF`ElV5{oF6T1RL?%@bK{Y=IWO(Z=liAr^EhNQCA)g_1cEDlO@O2kZ2|ep>oOA zgjBXu85$ZA@^O?DVPq*B#>rY~vWw(Qh~a28WXn=2B^nhcg@%Sww48)|_fOaPzW-dV zF@DSYKF|H!@AKTZK}g>Er%z(WjY(^9_`UI%>%M&t`U6N3e?SN%Qb^ysw@Vl-b*k_g z3D5c}?(D~?`z!!cDX)CJzP?;No8utFNU}7E^mcS~YP6z$z~w4{xumEl=@j=kF8+{M z+#LXA8P#9a&SBWu#Ms!8PM4FH?{7t2ish=1#F=XCKKy#Nkgin^BnMjo7K??B-CVOM zm0R%PjsjRi0CSlsEk$MNHUri>Wki$D=M#kau3asPc35^y@q)v?(4OACJH5@TaMrwj z)6*vu1oe$4MMM}UPEb=U|8wmHUWlco9Xqeu>pvT+W8v!EHah>6Ok%kBA zRI2jYdo4~CZu@Dpj%Ux7tEkkWP#+l?;ryCW2iPrIK%5}LB?t=^SF;5jA*rVkof^cS z_4Ym(9FK{OO-W8JcdH%w@B!#A9Snj1wT_s$INO$kR_mQMHscc$N)svE<9b=|AkT$x z-*eSlJkElLkn;-l7lwWn$xoj@|JJ&~AJJ1^OUrij3Nh7a%h6lE-k<*11#CdfB5^{* z#78(?5jArd5-tEN#Dx{-T@p-MtR#K=a5je}9XD}4DhMvg$*Kw4J9~RKBMwlWP%NxD z4qGCppWnF?V^Q#SlL?m3Y`(P`@-E+XjYh_Un(FFRI*6Rf%PJXVG>y!i>XlxHJUq;` z750?SDK(of<9$_Nd;yZlPTC5xItd90s8$x4M#7Ty?9`jP2TqvY>9WFQ)K0P)x_iki z2}#Xg|F+5=J~bWS;i`xrqsDIgXozEt+tgs$KA5}DZJa7{V(~8;a!X?yJKh_6B?fK@ zM~wCN_jh#Uqh;op_(mg``dr5n{`|Swxf?MkGV%v<+1udoJ$u+(uFjY`yA4_;IY1@` zIcO$KDZNN$0lt;H@;YCHlCF_8<;B$L-qJ>U@lir$q#SLm?Lx_pQ4P6jQo7nqsqEZ zH_o5>w*yjS+6gfTYpt)Z535Wd5M-NS5D62vyc3gpZc_ilk(Er>0vcR2Ue-n!>gtZ6 zeG?^4<9js!11k;&^2W!_s&gnR*b9stM}A1^vR zE2WSmFE7tWZbd0lK!fc1G8L-Ph7BFv-9v+eT^$|K(pM@fh69Y8oSf|K?eWqS6%|*r zOhP>V?THs@@W*Cgf|fzqJln9!(C`)t>ibPi3&_nL8{*qO-o8yW&5sgxu920MWu4{m zjLYS5`_HgcQE6n^a=^$nwYTqDB~5~UPtT3gs;cK*U9#lsAiLlf@_Gt(YWPv|LzAtY z-Q5&$D18`qNRWp1J0Hbgd_(-I@k_DL(#y%rY;I}6Q=0+l<7W^71_bc7zrSC!0N!r^ z)-anoSw*w*&Q^3Rp_5s~i|z7P{`3XkU~i))bG!ZV{rkSxovBFN->0U8!PRkbEt!jq zWFQAZ)7ZHSI(&pEZf2E{@*@btE3H_85doNsMw*$`fBzoN-Q@td1;}}4wc9RKR1ike z(bcsS?e8Q@N=iDE(AC+znwmjV1qFrX+a`?w09t7S|MBq|Mn@q4UUs%poW;h#>t1>< z?%fl0kNAm+*+oki#&mZ(@mx(^iwg&+K5`j9?aM>Mkf_QyoH!$nLkUEGA|oTK4u%1z zfpt2^W+OWyL0K|~A*X~7G8hbL?}*_2s@rpwNFNBi34wJxK7m&YsFrOepVWe{s@@Br`$QO=JNBrva{{@eyu2Kr5GA7y z(0+{182vsCKvhB?DfS%u3AgS3A>j5O;>>pqbH3}LVLasy<4BF4B~B1SH@z7i4l+_U$VstVuFWtn>SC_Vch*kt9t%!x^U{)A%5@p5 zqGX$|3J3{_^cC5$V+WcUot>SH_N;y1Uq^>OQSpFxiift1x_j3!(Lm~e`OF)Co}%J-&b&C`&?bY z#CsOd82k3^qtmhVt7~cyOP=Fhxw`tneR^<^otvvHCwD!d-$EgY-`R=y8aCJ}shg3L zm#3nr*w!C_O-2Z>JbbuQE6s($_(lxdqrC_yTunw(PcIMzIdXbvR8?Hu4&0u*`zs9I zAV44g(D#05$iU~3@{%P>I1X8CcGKWEA^yCYh02a-WI{I-g{7ybhYBSyC@7yA76^$f zf-?sNvK2gKQqqHS@Kf`;ib@FE;gmSPw>P4!Zu=(OqKKQP(OND;yR`^Q1>$5G>)l(6 zPME5aA*EO5R0@A9(1Gx3`HCF^#}eFsMtb_d(Er#eC3K3i+12Zfm03EJCYXP_X|vlkX1f?`UK;X(Sd<;d3ohdr9nYK zZC-&#j~;~qhO28Xj~90GKlnB+ZuJ(_=f`-F5p2a-u~6Y4RpG4?w=^TPH08dBo@! z9`j?F5k~YRp||5PKNM}Zk`fZxi&4_TOv(M$V^j!8nxMZoFOBo`_pgWbRCDuo(|mUZ zgQz{fehs1e)E((H;J%EG_-9N~T)eA2esc^ldR23L8Cl(z(9qD+^F1qe zB}MDyiPEMx69tc1qHAEoyf*?t+WGU~2*eh{`18;Z^DfE{FlyW6T!NTPa4bSVuCPy+ zV`CRoUVM4?u2s;>h~Iu2LNYVE(r9d9t(B&vsMsH)k7c$fHaV{ZeB6 zi?C&hfdL(f5W9Dc5u*8(QYmt$h)vuIEY#M*Qyky{!EtOJBvUt3!XNPnz76Nq2W)U>ayO%t?+ zkx{7Nej3W}t)j$eL{)egpc@Vqg8~EJy?>9L5EytB%eSoTytNuw84z#n?eEbYjwuV2 zjm=#Z`)mHN`T{v#aOt86+ed^#MH1;7S}6wy1_JWQ;+?BlbS*~*2bnb685meVnggLu z+5!%AmuVVQ6)2(RJP=4ZuUuG+W#z-*?-#rqqc0 zf(g)J>(;G%nMX;rE}&7ZWL)^GsPjy{mc&_%ziwQhkQ8TFyU4wq>xjY&H4oTnmoPj@{eKEj(iW3v7T-+W-In From 33f1812d482aca23ae7115b11e67156e6875334e Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 24 Sep 2020 15:52:49 -0500 Subject: [PATCH 094/105] params: fix etc mainnet MESS activation number Signed-off-by: meows --- params/config_classic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config_classic.go b/params/config_classic.go index 904a845400..30a271a909 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP1100FBlock: big.NewInt(11_377_500), // ETA 30 Sept 2020 + ECBP1100FBlock: big.NewInt(11_317_400), // ETA 1 October 2020 RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), From 427bf3b3c61abb5c4c48de69b372bac692d5a51e Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 24 Sep 2020 16:01:40 -0500 Subject: [PATCH 095/105] core: only calc prettyRation when needed Signed-off-by: meows --- core/blockchain_af.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index 0c5abee7e7..f143dbf490 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -92,12 +92,11 @@ func (bc *BlockChain) ecbp1100(commonAncestor, current, proposed *types.Header) got := new(big.Int).Mul(proposedSubchainTD, ecbp1100PolynomialVCurveFunctionDenominator) - prettyRatio, _ := new(big.Float).Quo( - new(big.Float).SetInt(got), - new(big.Float).SetInt(want), - ).Float64() - if got.Cmp(want) < 0 { + prettyRatio, _ := new(big.Float).Quo( + new(big.Float).SetInt(got), + new(big.Float).SetInt(want), + ).Float64() return fmt.Errorf(`%w: ECBP1100-MESS 🔒 status=rejected age=%v current.span=%v proposed.span=%v tdr/gravity=%0.6f common.bno=%d common.hash=%s current.bno=%d current.hash=%s proposed.bno=%d proposed.hash=%s`, errReorgFinality, common.PrettyAge(time.Unix(int64(commonAncestor.Time), 0)), From c15eecb1b4edf5c41b2033f2a11179113ded484a Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 24 Sep 2020 16:04:26 -0500 Subject: [PATCH 096/105] eth: bump stale-check safety interval to 30*13s Signed-off-by: meows --- eth/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/sync.go b/eth/sync.go index da9456d8c9..e7584adbac 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -49,7 +49,7 @@ var ( // artificialFinalitySafetyInterval defines the interval at which the local head is checked for staleness. // If the head is found to be stale across this interval, artificial finality features are disabled. // This prevents an abandoned victim of an eclipse attack from being forever destitute. - artificialFinalitySafetyInterval = time.Second * time.Duration(10*vars.DurationLimit.Uint64()) + artificialFinalitySafetyInterval = time.Second * time.Duration(30*vars.DurationLimit.Uint64()) ) type txsync struct { From 15d4bf9c4eb5504ab5bd2bd2fd10454c6030c1ce Mon Sep 17 00:00:00 2001 From: meows Date: Thu, 24 Sep 2020 16:05:18 -0500 Subject: [PATCH 097/105] eth: bump MESS minpeers safety to 5 (defaultMinSyncPeers) Signed-off-by: meows --- eth/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/sync.go b/eth/sync.go index e7584adbac..7ede248820 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -44,7 +44,7 @@ var ( // minArtificialFinalityPeers defines the minimum number of peers our node must be connected // to in order to enable artificial finality features. // A minimum number of peer connections mitigates the risk of lower-powered eclipse attacks. - minArtificialFinalityPeers = defaultMinSyncPeers * 2 + minArtificialFinalityPeers = defaultMinSyncPeers // artificialFinalitySafetyInterval defines the interval at which the local head is checked for staleness. // If the head is found to be stale across this interval, artificial finality features are disabled. From 5b63eb5914e567be593ca0652e234ccc52a38096 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 05:43:29 -0500 Subject: [PATCH 098/105] eth,goethereum: add test for protocolman AF features enable/disable Signed-off-by: meows --- eth/sync_test.go | 62 +++++++++++++++++++ params/types/goethereum/goethereum.go | 3 + .../goethereum/goethereum_configurator.go | 8 +-- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/eth/sync_test.go b/eth/sync_test.go index ac1e5fad1b..0e699ce78f 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -17,6 +17,7 @@ package eth import ( + "fmt" "sync/atomic" "testing" "time" @@ -62,3 +63,64 @@ func testFastSyncDisabling(t *testing.T, protocol int) { t.Fatalf("fast sync not disabled after successful synchronisation") } } + +func TestArtificialFinalityFeatureEnablingDisabling(t *testing.T) { + // Create a full protocol manager, check that fast sync gets disabled + a, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil) + if atomic.LoadUint32(&a.fastSync) == 1 { + t.Fatalf("fast sync not disabled on non-empty blockchain") + } + + one := uint64(1) + a.blockchain.Config().SetECBP1100Transition(&one) + + oMinAFPeers := minArtificialFinalityPeers + defer func() { + // Clean up after, resetting global default to original valu. + minArtificialFinalityPeers = oMinAFPeers + }() + minArtificialFinalityPeers = 1 + + // Create a full protocol manager, check that fast sync gets disabled + b, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil) + if atomic.LoadUint32(&b.fastSync) == 0 { + t.Fatalf("fast sync disabled on pristine blockchain") + } + b.blockchain.Config().SetECBP1100Transition(&one) + // b.chainSync.forced = true + + io1, io2 := p2p.MsgPipe() + go a.handle(a.newPeer(65, p2p.NewPeer(enode.ID{}, fmt.Sprintf("peer-b"), nil), io2, a.txpool.Get)) + go b.handle(b.newPeer(65, p2p.NewPeer(enode.ID{}, fmt.Sprintf("peer-a"), nil), io1, b.txpool.Get)) + time.Sleep(250 * time.Millisecond) + + op := peerToSyncOp(downloader.FullSync, b.peers.BestPeer()) + if err := b.doSync(op); err != nil { + t.Fatalf("sync failed: %v", err) + } + + b.chainSync.forced = true + next := b.chainSync.nextSyncOp() + if next != nil { + t.Fatal("non-nil next sync op") + } + if !b.blockchain.Config().IsEnabled(b.blockchain.Config().GetECBP1100Transition, b.blockchain.CurrentBlock().Number()) { + t.Error("AF feature not configured") + } + if !b.blockchain.IsArtificialFinalityEnabled() { + t.Error("AF not enabled") + } + + // Set the value back to default (more than 1). + minArtificialFinalityPeers = oMinAFPeers + + // Next sync op will unset AF because manager only has 1 peer. + b.chainSync.forced = true + next = b.chainSync.nextSyncOp() + if next != nil { + t.Fatal("non-nil next sync op") + } + if b.blockchain.IsArtificialFinalityEnabled() { + t.Error("AF not disabled") + } +} diff --git a/params/types/goethereum/goethereum.go b/params/types/goethereum/goethereum.go index 35fb77eae4..7b3af6fc4e 100644 --- a/params/types/goethereum/goethereum.go +++ b/params/types/goethereum/goethereum.go @@ -70,6 +70,9 @@ type ChainConfig struct { EIP1706Transition *big.Int `json:"-"` ECIP1080Transition *big.Int `json:"-"` + + // Cache types for use with testing, but will not show up in config API. + ecbp1100Transition *big.Int `json:"-"` } // String implements the fmt.Stringer interface. diff --git a/params/types/goethereum/goethereum_configurator.go b/params/types/goethereum/goethereum_configurator.go index 7a526b1ade..df53e7c99a 100644 --- a/params/types/goethereum/goethereum_configurator.go +++ b/params/types/goethereum/goethereum_configurator.go @@ -392,14 +392,12 @@ func (c *ChainConfig) SetEIP2537Transition(n *uint64) error { } func (c *ChainConfig) GetECBP1100Transition() *uint64 { - return nil + return bigNewU64(c.ecbp1100Transition) } func (c *ChainConfig) SetECBP1100Transition(n *uint64) error { - if n == nil { - return nil - } - return ctypes.ErrUnsupportedConfigFatal + c.ecbp1100Transition = setBig(c.ecbp1100Transition, n) + return nil } func (c *ChainConfig) IsEnabled(fn func() *uint64, n *big.Int) bool { From 36bb18e8cb5a636fd16fb5be0ebdb2f0f4ebad1c Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 06:10:19 -0500 Subject: [PATCH 099/105] core: remove vestigal int64 polynomial fn, rm emath import This just keeps things lean and mean. The int64 was a precursor to the function which is now using big.Ints Signed-off-by: meows --- core/blockchain_af.go | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index f143dbf490..abff862204 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -9,7 +9,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - emath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -138,11 +137,15 @@ func ecbp1100PolynomialV(x *big.Int) *big.Int { // if x > xcap: // x = xcap - xA := big.NewInt(0) - xA.Set(emath.BigMin(x, ecbp1100PolynomialVXCap)) + xA := new(big.Int).Set(x) + if xA.Cmp(ecbp1100PolynomialVXCap) > 0 { + xA.Set(ecbp1100PolynomialVXCap) + } - xB := big.NewInt(0) - xB.Set(emath.BigMin(x, ecbp1100PolynomialVXCap)) + xB := new(big.Int).Set(x) + if xB.Cmp(ecbp1100PolynomialVXCap) > 0 { + xB.Set(ecbp1100PolynomialVXCap) + } out := big.NewInt(0) @@ -191,23 +194,6 @@ var ecbp1100PolynomialVAmpl = big.NewInt(15) // height = CURVE_FUNCTION_DENOMINATOR * (ampl * 2) var ecbp1100PolynomialVHeight = new(big.Int).Mul(new(big.Int).Mul(ecbp1100PolynomialVCurveFunctionDenominator, ecbp1100PolynomialVAmpl), big2) -/* -ecbp1100PolynomialVI64 is an int64 implementation of ecbp1100PolynomialV. -*/ -func ecbp1100PolynomialVI64(x int64) int64 { - if x > ecbp1100PolynomialVXCapI64 { - x = ecbp1100PolynomialVXCapI64 - } - return ecbp1100PolynomialVCurveFunctionDenominatorI64 + - ((3*emath.BigPow(x, 2).Int64())-(2*emath.BigPow(x, 3).Int64()/ecbp1100PolynomialVXCapI64))* - ecbp1100PolynomialVHeightI64/(emath.BigPow(ecbp1100PolynomialVXCapI64, 2).Int64()) -} - -var ecbp1100PolynomialVCurveFunctionDenominatorI64 = int64(128) -var ecbp1100PolynomialVXCapI64 = int64(25132) -var ecbp1100PolynomialVAmplI64 = int64(15) -var ecbp1100PolynomialVHeightI64 = ecbp1100PolynomialVCurveFunctionDenominatorI64 * ecbp1100PolynomialVAmplI64 * 2 - /* ecbp1100AGSinusoidalA is a sinusoidal function. From ab57ebbf0269ac19fb8fdf81cc49ffa5531065f5 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 06:18:44 -0500 Subject: [PATCH 100/105] core: tidy up test file This just rearranges to put importanter stuff on top, and to move the development tests and plotters toward the bottom. Signed-off-by: meows --- core/blockchain_af_test.go | 831 ++++++++++++++++++++++--------------- 1 file changed, 498 insertions(+), 333 deletions(-) diff --git a/core/blockchain_af_test.go b/core/blockchain_af_test.go index 82609374e5..ced8f6fc0c 100644 --- a/core/blockchain_af_test.go +++ b/core/blockchain_af_test.go @@ -8,7 +8,9 @@ import ( "math/big" "math/rand" "testing" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -20,9 +22,7 @@ import ( "gonum.org/v1/plot/vg/draw" ) -var yuckyGlobalTestEnableMess = false - -func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error) { +func runMESSTest2(t *testing.T, enableMess bool, easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error, hard, easy []*types.Block) { // Generate the original common chain segment and the two competing forks engine := ethash.NewFaker() @@ -35,14 +35,14 @@ func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardH t.Fatal(err) } defer chain.Stop() - chain.EnableArtificialFinality(yuckyGlobalTestEnableMess) + chain.EnableArtificialFinality(enableMess) - easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { + easy, _ = GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) b.OffsetTime(easyT) }) commonAncestor := easy[caN-1] - hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { + hard, _ = GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) b.OffsetTime(hardT) }) @@ -55,394 +55,153 @@ func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardH return } -func TestBlockChain_AF_ECBP1100(t *testing.T) { - t.Skip("These have been disused as of the sinusoidal -> cubic change.") - yuckyGlobalTestEnableMess = true - defer func() { - yuckyGlobalTestEnableMess = false - }() +func TestBlockChain_AF_ECBP1100_2(t *testing.T) { + offsetGreaterDifficulty := int64(-2) // 1..8 = -9..-2 + offsetSameDifficulty := int64(0) // 9..17 = -1..8 + offsetWorseDifficulty := int64(8) // 18.. cases := []struct { easyLen, hardLen, commonAncestorN int easyOffset, hardOffset int64 hardGetsHead, accepted bool }{ - // INDEX=0 - // Hard has insufficient total difficulty / length and is rejected. - { - 5000, 7500, 2500, - 50, -9, - false, false, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 1000, 7, 995, - 60, 0, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. + // NOTE: Random coin tosses involved for equivalent difficulty. + // Short trials for those are skipped. + { - 1000, 7, 995, - 60, 7, - true, true, + 1000, 30, 970, + 0, offsetSameDifficulty, // same difficulty + false, true, }, - // Hard has sufficient total difficulty / length and is accepted. + { 1000, 1, 999, - 30, 1, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 3, 497, - 0, -8, - true, true, - }, - // INDEX=5 - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 4, 496, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 5, 495, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 6, 494, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 7, 493, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 8, 492, - 0, -9, - true, true, - }, - // INDEX=10 - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 9, 491, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 12, 488, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 20, 480, - 0, -9, - true, true, - }, - // Hard has sufficient total difficulty / length and is accepted. - { - 500, 40, 460, - 0, -9, - true, true, + 0, offsetWorseDifficulty, // worse! difficulty + false, true, }, - // Hard has sufficient total difficulty / length and is accepted. { - 500, 60, 440, - 0, -9, + 1000, 1, 999, + 0, offsetGreaterDifficulty, // better difficulty true, true, }, - // // INDEX=15 - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 250, 250, - 0, -9, - false, false, - }, - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 250, 250, - 7, -9, - false, false, - }, - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 300, 200, - 13, -9, - false, false, - }, - // Hard has sufficient total difficulty / length and is accepted. { - 500, 200, 300, - 47, -9, + 1000, 5, 995, + 0, offsetGreaterDifficulty, true, true, }, - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 200, 300, - 47, -8, - false, false, - }, - // // INDEX=20 - // Hard has insufficient total difficulty / length and is rejected. - { - 500, 200, 300, - 17, -8, - false, false, - }, - // Hard has insufficient total difficulty / length and is rejected. { - 500, 200, 300, - 7, -8, - false, false, + 1000, 25, 975, + 0, offsetGreaterDifficulty, + false, true, }, - // Hard has insufficient total difficulty / length and is rejected. { - 500, 200, 300, - 0, -8, - false, false, + 1000, 30, 970, + 0, offsetGreaterDifficulty, + false, true, }, - // Hard has insufficient total difficulty / length and is rejected. { - 500, 100, 400, - 0, -7, - false, false, + 1000, 50, 950, + 0, offsetGreaterDifficulty, + false, true, }, - // Hard is accepted, but does not have greater total difficulty, - // and is not set as the chain head. { - 1000, 1, 900, - 60, -9, + 1000, 50, 950, + 0, offsetGreaterDifficulty, false, true, }, - // INDEX=25 - // Hard is shorter, but sufficiently heavier chain, is accepted. { - 500, 100, 390, - 60, -9, + 1000, 1000, 900, + 0, offsetGreaterDifficulty, true, true, }, - } - - for i, c := range cases { - hardHead, err := runMESSTest(t, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) - if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { - t.Errorf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", - i, - c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, - c.accepted, c.hardGetsHead, hardHead, err) - } - } -} - -func TestBlockChain_AF_ECBP1100_2(t *testing.T) { - yuckyGlobalTestEnableMess = true - defer func() { - yuckyGlobalTestEnableMess = false - }() - - cases := []struct { - easyLen, hardLen, commonAncestorN int - easyOffset, hardOffset int64 - hardGetsHead, accepted bool - }{ - // Random coin tosses involved for equivalent difficulty. - // { - // 1000, 1, 999, - // 0, 0, // -1 offset => 10-1=9 same child difficulty - // false, true, - // }, - // { - // 1000, 3, 997, - // 0, 0, // -1 offset => 10-1=9 same child difficulty - // false, true, - // }, - // { - // 1000, 10, 990, - // 0, 0, // -1 offset => 10-1=9 same child difficulty - // false, true, - // }, { - 1000, 1, 999, - 0, -2, // better difficulty + 1000, 2000, 800, + 0, offsetGreaterDifficulty, true, true, }, { - 1000, 25, 975, - 0, -2, // better difficulty + 1000, 2000, 700, + 0, offsetGreaterDifficulty, true, true, }, { - 1000, 30, 970, - 0, -2, // better difficulty - false, true, - }, - { - 1000, 50, 950, - 0, -5, + 1000, 2000, 700, + 0, offsetGreaterDifficulty, true, true, }, - { - 1000, 50, 950, - 0, -1, - false, true, - }, { 1000, 999, 1, - 0, -9, - true, true, + 0, offsetGreaterDifficulty, + false, true, }, { 1000, 999, 1, - 0, -8, + 0, offsetGreaterDifficulty, false, true, }, { 1000, 500, 500, - 0, -8, - true, true, + 0, offsetGreaterDifficulty, + false, true, }, { 1000, 500, 500, - 0, -7, + 0, offsetGreaterDifficulty, false, true, }, { 1000, 300, 700, - 0, -7, + 0, offsetGreaterDifficulty, false, true, }, + { + 1000, 600, 700, + 0, offsetGreaterDifficulty, + true, true, + }, // Will pass, takes a long time. // { // 5000, 4000, 1000, - // 0, -9, + // 0, -2, // true, true, // }, } for i, c := range cases { - hardHead, err := runMESSTest(t, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + hardHead, err, hard, easy := runMESSTest2(t, true, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + + ee, hh := easy[len(easy)-1], hard[len(hard)-1] + rat, _ := new(big.Float).Quo( + new(big.Float).SetInt(hh.Difficulty()), + new(big.Float).SetInt(ee.Difficulty()), + ).Float64() + + logf := fmt.Sprintf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] drat=%0.6f span=%v hardHead(w|g)=%v|%v err=%v", + i, + c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, + rat, + common.PrettyDuration(time.Second*time.Duration(10*(c.easyLen-c.commonAncestorN))), + c.hardGetsHead, hardHead, err) + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { - t.Errorf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", - i, - c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, - c.accepted, c.hardGetsHead, hardHead, err) + t.Error("FAIL", logf) + } else { + t.Log("PASS", logf) } } } -func TestBlockChain_GenerateMESSPlot(t *testing.T) { - // t.Skip("This test plots graph of chain acceptance for visualization.") - - easyLen := 500 - maxHardLen := 400 +/* +TestAFKnownBlock tests that AF functionality works for chain re-insertions. - generatePlot := func(title, fileName string) { - p, err := plot.New() - if err != nil { - log.Panic(err) - } - p.Title.Text = title - p.X.Label.Text = "Block Depth" - p.Y.Label.Text = "Mode Block Time Offset (10 seconds + y)" +Chain re-insertions use BlockChain.writeKnownBlockAsHead, where first-pass insertions +will hit writeBlockWithState. - accepteds := plotter.XYs{} - rejecteds := plotter.XYs{} - sides := plotter.XYs{} - - for i := 1; i <= maxHardLen; i++ { - for j := -9; j <= 8; j++ { - fmt.Println("running", i, j) - hardHead, err := runMESSTest(t, easyLen, i, easyLen-i, 0, int64(j)) - point := plotter.XY{X: float64(i), Y: float64(j)} - if err == nil && hardHead { - accepteds = append(accepteds, point) - } else if err == nil && !hardHead { - sides = append(sides, point) - } else if err != nil { - rejecteds = append(rejecteds, point) - } - - if err != nil { - t.Log(err) - } - } - } - - scatterAccept, _ := plotter.NewScatter(accepteds) - scatterReject, _ := plotter.NewScatter(rejecteds) - scatterSide, _ := plotter.NewScatter(sides) - - pixelWidth := vg.Length(1000) - - scatterAccept.Color = color.RGBA{R: 152, G: 236, B: 161, A: 255} - scatterAccept.Shape = draw.BoxGlyph{} - scatterAccept.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) - scatterReject.Color = color.RGBA{R: 236, G: 106, B: 94, A: 255} - scatterReject.Shape = draw.BoxGlyph{} - scatterReject.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) - scatterSide.Color = color.RGBA{R: 190, G: 197, B: 236, A: 255} - scatterSide.Shape = draw.BoxGlyph{} - scatterSide.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) - - p.Add(scatterAccept) - p.Legend.Add("Accepted", scatterAccept) - p.Add(scatterReject) - p.Legend.Add("Rejected", scatterReject) - p.Add(scatterSide) - p.Legend.Add("Sidechained", scatterSide) - - p.Legend.YOffs = -30 - - err = p.Save(pixelWidth, 300, fileName) - if err != nil { - log.Panic(err) - } - } - yuckyGlobalTestEnableMess = true - defer func() { - yuckyGlobalTestEnableMess = false - }() - baseTitle := fmt.Sprintf("Accept/Reject Reorgs: Relative Time (Difficulty) over Proposed Segment Length (%d-block original chain)", easyLen) - generatePlot(baseTitle, "reorgs-MESS.png") - yuckyGlobalTestEnableMess = false - // generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") -} - -func TestEcbp1100AGSinusoidalA(t *testing.T) { - cases := []struct { - in, out float64 - }{ - {0, 1}, - {25132, 31}, - } - tolerance := 0.0000001 - for i, c := range cases { - if got := ecbp1100AGSinusoidalA(c.in); got < c.out-tolerance || got > c.out+tolerance { - t.Fatalf("%d: in: %0.6f want: %0.6f got: %0.6f", i, c.in, c.out, got) - } - } -} - -/* -TestAFKnownBlock tests that AF functionality works for chain re-insertions. - -Chain re-insertions use BlockChain.writeKnownBlockAsHead, where first-pass insertions -will hit writeBlockWithState. - -AF needs to be implemented at both sites to prevent re-proposed chains from sidestepping -the AF criteria. -*/ -func TestAFKnownBlock(t *testing.T) { - engine := ethash.NewFaker() +AF needs to be implemented at both sites to prevent re-proposed chains from sidestepping +the AF criteria. +*/ +func TestAFKnownBlock(t *testing.T) { + engine := ethash.NewFaker() db := rawdb.NewMemoryDatabase() genesis := params.DefaultMessNetGenesisBlock() @@ -483,6 +242,30 @@ func TestAFKnownBlock(t *testing.T) { } } +// TestEcbp1100PolynomialV tests the general shape and return values of the ECBP1100 polynomial curve. +// It makes sure domain values above the 'cap' do indeed get limited, as well +// as sanity check some normal domain values. +func TestEcbp1100PolynomialV(t *testing.T) { + cases := []struct { + block, ag int64 + }{ + {100, 1}, + {300, 2}, + {500, 5}, + {1000, 16}, + {2000, 31}, + {10000, 31}, + {1e9, 31}, + } + for i, c := range cases { + y := ecbp1100PolynomialV(big.NewInt(c.block * 13)) + y.Div(y, ecbp1100PolynomialVCurveFunctionDenominator) + if c.ag != y.Int64() { + t.Fatal("mismatch", i) + } + } +} + func TestPlot_ecbp1100PolynomialV(t *testing.T) { t.Skip("This test plots a graph of the ECBP1100 polynomial curve.") p, err := plot.New() @@ -514,11 +297,59 @@ func TestPlot_ecbp1100PolynomialV(t *testing.T) { } } -func TestEcbp1100PolynomialV(t *testing.T) { - t.Log( - ecbp1100PolynomialV(big.NewInt(99)), - ecbp1100PolynomialV(big.NewInt(999)), - ecbp1100PolynomialV(big.NewInt(99999))) +func TestEcbp1100AGSinusoidalA(t *testing.T) { + cases := []struct { + in, out float64 + }{ + {0, 1}, + {25132, 31}, + } + tolerance := 0.0000001 + for i, c := range cases { + if got := ecbp1100AGSinusoidalA(c.in); got < c.out-tolerance || got > c.out+tolerance { + t.Fatalf("%d: in: %0.6f want: %0.6f got: %0.6f", i, c.in, c.out, got) + } + } +} + +func TestDifficultyDelta(t *testing.T) { + t.Skip("A development test to play with difficulty steps.") + parent := &types.Header{ + Number: big.NewInt(1_000_000), + Difficulty: params.DefaultMessNetGenesisBlock().Difficulty, + Time: uint64(time.Now().Unix()), + UncleHash: types.EmptyUncleHash, + } + + data := plotter.XYs{} + + for i := uint64(1); i <= 60; i++ { + nextTime := parent.Time + i + d := ethash.CalcDifficulty(params.MessNetConfig, nextTime, parent) + + rat, _ := new(big.Float).Quo( + new(big.Float).SetInt(d), + new(big.Float).SetInt(parent.Difficulty), + ).Float64() + + t.Log(i, rat) + data = append(data, plotter.XY{X: float64(i), Y: rat}) + } + + p, err := plot.New() + if err != nil { + log.Panic(err) + } + p.Title.Text = "Block Difficulty Delta by Timestamp Offset" + p.X.Label.Text = "Timestamp Offset" + p.Y.Label.Text = "Relative Difficulty (child/parent)" + + dataScatter, _ := plotter.NewScatter(data) + p.Add(dataScatter) + + if err := p.Save(800, 600, "difficulty-adjustments.png"); err != nil { + t.Fatal(err) + } } func TestGenerateChainTargetingHashrate(t *testing.T) { @@ -543,18 +374,352 @@ func TestGenerateChainTargetingHashrate(t *testing.T) { if _, err := chain.InsertChain(easy); err != nil { t.Fatal(err) } - firstDifficulty := chain.CurrentHeader().Difficulty - targetDifficultyRatio := big.NewInt(2) - targetDifficulty := new(big.Int).Div(firstDifficulty, targetDifficultyRatio) - for chain.CurrentHeader().Difficulty.Cmp(targetDifficulty) > 0 { + + baseDifficulty := chain.CurrentHeader().Difficulty + targetDifficultyRatio := big.NewInt(4) + targetDifficulty := new(big.Int).Mul(baseDifficulty, targetDifficultyRatio) + + data := plotter.XYs{} + + for chain.CurrentHeader().Difficulty.Cmp(targetDifficulty) < 0 { next, _ := GenerateChain(genesis.Config, chain.CurrentBlock(), engine, db, 1, func(i int, gen *BlockGen) { - gen.OffsetTime(8) // 8: (=10+8=18>(13+4=17).. // minimum value over stable range + gen.OffsetTime(-9) // 8: (=10+8=18>(13+4=17).. // minimum value over stable range }) if _, err := chain.InsertChain(next); err != nil { t.Fatal(err) } + + // f, _ := new(big.Float).SetInt(next[0].Difficulty()).Float64() + // data = append(data, plotter.XY{X: float64(next[0].NumberU64()), Y: f}) + + rat1, _ := new(big.Float).Quo( + new(big.Float).SetInt(next[0].Difficulty()), + new(big.Float).SetInt(targetDifficulty), + ).Float64() + + // rat, _ := new(big.Float).Quo( + // new(big.Float).SetInt(next[0].Difficulty()), + // new(big.Float).SetInt(targetDifficultyRatio), + // ).Float64() + + data = append(data, plotter.XY{X: float64(next[0].NumberU64()), Y: rat1}) } t.Log(chain.CurrentBlock().Number()) + + p, err := plot.New() + if err != nil { + log.Panic(err) + } + p.Title.Text = fmt.Sprintf("Block Difficulty Toward Target: %dx", targetDifficultyRatio.Uint64()) + p.X.Label.Text = "Block Number" + p.Y.Label.Text = "Difficulty" + + dataScatter, _ := plotter.NewScatter(data) + p.Add(dataScatter) + + if err := p.Save(800, 600, "difficulty-toward-target.png"); err != nil { + t.Fatal(err) + } +} + +func runMESSTest(t *testing.T, easyL, hardL, caN int, easyT, hardT int64) (hardHead bool, err error) { + // Generate the original common chain segment and the two competing forks + engine := ethash.NewFaker() + + db := rawdb.NewMemoryDatabase() + genesis := params.DefaultMessNetGenesisBlock() + genesisB := MustCommitGenesis(db, genesis) + + chain, err := NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatal(err) + } + defer chain.Stop() + chain.EnableArtificialFinality(yuckyGlobalTestEnableMess) + + easy, _ := GenerateChain(genesis.Config, genesisB, engine, db, easyL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(easyT) + }) + commonAncestor := easy[caN-1] + hard, _ := GenerateChain(genesis.Config, commonAncestor, engine, db, hardL, func(i int, b *BlockGen) { + b.SetNonce(types.EncodeNonce(uint64(rand.Int63n(math.MaxInt64)))) + b.OffsetTime(hardT) + }) + + if _, err := chain.InsertChain(easy); err != nil { + t.Fatal(err) + } + _, err = chain.InsertChain(hard) + hardHead = chain.CurrentBlock().Hash() == hard[len(hard)-1].Hash() + return +} + +var yuckyGlobalTestEnableMess = false + +func TestBlockChain_GenerateMESSPlot(t *testing.T) { + t.Skip("This test plots graph of chain acceptance for visualization.") + easyLen := 500 + maxHardLen := 400 + + generatePlot := func(title, fileName string) { + p, err := plot.New() + if err != nil { + log.Panic(err) + } + p.Title.Text = title + p.X.Label.Text = "Block Depth" + p.Y.Label.Text = "Mode Block Time Offset (10 seconds + y)" + + accepteds := plotter.XYs{} + rejecteds := plotter.XYs{} + sides := plotter.XYs{} + + for i := 1; i <= maxHardLen; i++ { + for j := -9; j <= 8; j++ { + fmt.Println("running", i, j) + hardHead, err := runMESSTest(t, easyLen, i, easyLen-i, 0, int64(j)) + point := plotter.XY{X: float64(i), Y: float64(j)} + if err == nil && hardHead { + accepteds = append(accepteds, point) + } else if err == nil && !hardHead { + sides = append(sides, point) + } else if err != nil { + rejecteds = append(rejecteds, point) + } + + if err != nil { + t.Log(err) + } + } + } + + scatterAccept, _ := plotter.NewScatter(accepteds) + scatterReject, _ := plotter.NewScatter(rejecteds) + scatterSide, _ := plotter.NewScatter(sides) + + pixelWidth := vg.Length(1000) + + scatterAccept.Color = color.RGBA{R: 152, G: 236, B: 161, A: 255} + scatterAccept.Shape = draw.BoxGlyph{} + scatterAccept.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + scatterReject.Color = color.RGBA{R: 236, G: 106, B: 94, A: 255} + scatterReject.Shape = draw.BoxGlyph{} + scatterReject.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + scatterSide.Color = color.RGBA{R: 190, G: 197, B: 236, A: 255} + scatterSide.Shape = draw.BoxGlyph{} + scatterSide.Radius = vg.Length((float64(pixelWidth) / float64(maxHardLen)) * 2 / 3) + + p.Add(scatterAccept) + p.Legend.Add("Accepted", scatterAccept) + p.Add(scatterReject) + p.Legend.Add("Rejected", scatterReject) + p.Add(scatterSide) + p.Legend.Add("Sidechained", scatterSide) + + p.Legend.YOffs = -30 + + err = p.Save(pixelWidth, 300, fileName) + if err != nil { + log.Panic(err) + } + } + yuckyGlobalTestEnableMess = true + defer func() { + yuckyGlobalTestEnableMess = false + }() + baseTitle := fmt.Sprintf("Accept/Reject Reorgs: Relative Time (Difficulty) over Proposed Segment Length (%d-block original chain)", easyLen) + generatePlot(baseTitle, "reorgs-MESS.png") + yuckyGlobalTestEnableMess = false + // generatePlot("WITHOUT MESS: "+baseTitle, "reorgs-noMESS.png") +} + +func TestBlockChain_AF_ECBP1100(t *testing.T) { + t.Skip("These have been disused as of the sinusoidal -> cubic change.") + yuckyGlobalTestEnableMess = true + defer func() { + yuckyGlobalTestEnableMess = false + }() + + cases := []struct { + easyLen, hardLen, commonAncestorN int + easyOffset, hardOffset int64 + hardGetsHead, accepted bool + }{ + // INDEX=0 + // Hard has insufficient total difficulty / length and is rejected. + { + 5000, 7500, 2500, + 50, -9, + false, false, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 1000, 7, 995, + 60, 0, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 1000, 7, 995, + 60, 7, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 1000, 1, 999, + 30, 1, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 3, 497, + 0, -8, + true, true, + }, + // INDEX=5 + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 4, 496, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 5, 495, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 6, 494, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 7, 493, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 8, 492, + 0, -9, + true, true, + }, + // INDEX=10 + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 9, 491, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 12, 488, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 20, 480, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 40, 460, + 0, -9, + true, true, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 60, 440, + 0, -9, + true, true, + }, + // // INDEX=15 + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 250, 250, + 0, -9, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 250, 250, + 7, -9, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 300, 200, + 13, -9, + false, false, + }, + // Hard has sufficient total difficulty / length and is accepted. + { + 500, 200, 300, + 47, -9, + true, true, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 200, 300, + 47, -8, + false, false, + }, + // // INDEX=20 + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 200, 300, + 17, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 200, 300, + 7, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 200, 300, + 0, -8, + false, false, + }, + // Hard has insufficient total difficulty / length and is rejected. + { + 500, 100, 400, + 0, -7, + false, false, + }, + // Hard is accepted, but does not have greater total difficulty, + // and is not set as the chain head. + { + 1000, 1, 900, + 60, -9, + false, true, + }, + // INDEX=25 + // Hard is shorter, but sufficiently heavier chain, is accepted. + { + 500, 100, 390, + 60, -9, + true, true, + }, + } + + for i, c := range cases { + hardHead, err := runMESSTest(t, c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset) + if (err != nil && c.accepted) || (err == nil && !c.accepted) || (hardHead != c.hardGetsHead) { + t.Errorf("case=%d [easy=%d hard=%d ca=%d eo=%d ho=%d] want.accepted=%v want.hardHead=%v got.hardHead=%v err=%v", + i, + c.easyLen, c.hardLen, c.commonAncestorN, c.easyOffset, c.hardOffset, + c.accepted, c.hardGetsHead, hardHead, err) + } + } } func TestBlockChain_AF_Difficulty_Develop(t *testing.T) { From 4aceb8621a2c8f6913c7f4fec25b4345bfd5e536 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 06:23:23 -0500 Subject: [PATCH 101/105] core: update comment to use current instead of proposed Signed-off-by: meows --- core/blockchain_af.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain_af.go b/core/blockchain_af.go index abff862204..47466e095a 100644 --- a/core/blockchain_af.go +++ b/core/blockchain_af.go @@ -129,7 +129,7 @@ def get_curve_function_numerator(time_delta: int) -> int: The if tdRatio < antiGravity check would then be -if proposed_subchain_td * CURVE_FUNCTION_DENOMINATOR < get_curve_function_numerator(proposed.Time - commonAncestor.Time) * local_subchain_td. +if proposed_subchain_td * CURVE_FUNCTION_DENOMINATOR < get_curve_function_numerator(current.Time - commonAncestor.Time) * local_subchain_td. */ func ecbp1100PolynomialV(x *big.Int) *big.Int { From b8bcdb8dc645173f79e23ca0b6cfbdc3296d12f4 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 06:26:23 -0500 Subject: [PATCH 102/105] eth: (lint) fix unecesssary use of Sprintf (gosimple) Signed-off-by: meows --- eth/sync_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/eth/sync_test.go b/eth/sync_test.go index 0e699ce78f..1dcb838307 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -17,7 +17,6 @@ package eth import ( - "fmt" "sync/atomic" "testing" "time" @@ -90,8 +89,8 @@ func TestArtificialFinalityFeatureEnablingDisabling(t *testing.T) { // b.chainSync.forced = true io1, io2 := p2p.MsgPipe() - go a.handle(a.newPeer(65, p2p.NewPeer(enode.ID{}, fmt.Sprintf("peer-b"), nil), io2, a.txpool.Get)) - go b.handle(b.newPeer(65, p2p.NewPeer(enode.ID{}, fmt.Sprintf("peer-a"), nil), io1, b.txpool.Get)) + go a.handle(a.newPeer(65, p2p.NewPeer(enode.ID{}, "peer-b", nil), io2, a.txpool.Get)) + go b.handle(b.newPeer(65, p2p.NewPeer(enode.ID{}, "peer-a", nil), io1, b.txpool.Get)) time.Sleep(250 * time.Millisecond) op := peerToSyncOp(downloader.FullSync, b.peers.BestPeer()) From 41ee136f2463ba97dd7016059fc09782a5ccb4c2 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 06:26:47 -0500 Subject: [PATCH 103/105] goethereum: (lint) fix struct tag on nonexported field (govet) Signed-off-by: meows --- params/types/goethereum/goethereum.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/types/goethereum/goethereum.go b/params/types/goethereum/goethereum.go index 7b3af6fc4e..5e6308df59 100644 --- a/params/types/goethereum/goethereum.go +++ b/params/types/goethereum/goethereum.go @@ -72,7 +72,7 @@ type ChainConfig struct { ECIP1080Transition *big.Int `json:"-"` // Cache types for use with testing, but will not show up in config API. - ecbp1100Transition *big.Int `json:"-"` + ecbp1100Transition *big.Int } // String implements the fmt.Stringer interface. From 2f81f3230faf7c211f2b0fd623f134d9be3bc2ab Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 07:20:49 -0500 Subject: [PATCH 104/105] core: fix log value for proposed reorg span Signed-off-by: meows --- core/blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain.go b/core/blockchain.go index 1ec95526ea..d27d89f92a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1571,7 +1571,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. "status", "accepted", "age", common.PrettyAge(time.Unix(int64(d.commonBlock.Time()), 0)), "current.span", common.PrettyDuration(time.Duration(currentBlock.Time()-d.commonBlock.Time())*time.Second), - "proposed.span", common.PrettyDuration(time.Duration(int32(block.Time()))*time.Second), + "proposed.span", common.PrettyDuration(time.Duration(block.Time()-d.commonBlock.Time())*time.Second), "common.bno", d.commonBlock.Number().Uint64(), "common.hash", d.commonBlock.Hash(), "current.bno", currentBlock.Number().Uint64(), "current.hash", currentBlock.Hash(), "proposed.bno", block.Number().Uint64(), "proposed.hash", block.Hash(), From fbe85f3cfa6800fcae7b29770482b65054c7d792 Mon Sep 17 00:00:00 2001 From: meows Date: Fri, 25 Sep 2020 13:11:09 -0500 Subject: [PATCH 105/105] params: update MESS block numbers for etc mainnet, mordor testnet Signed-off-by: meows --- params/config_classic.go | 2 +- params/config_mordor.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/params/config_classic.go b/params/config_classic.go index 30a271a909..dfaa98f128 100644 --- a/params/config_classic.go +++ b/params/config_classic.go @@ -75,7 +75,7 @@ var ( ECIP1017EraRounds: big.NewInt(5000000), ECIP1010PauseBlock: big.NewInt(3000000), ECIP1010Length: big.NewInt(2000000), - ECBP1100FBlock: big.NewInt(11_317_400), // ETA 1 October 2020 + ECBP1100FBlock: nil, // TODO@ethereumclassic/ECIPS RequireBlockHashes: map[uint64]common.Hash{ 1920000: common.HexToHash("0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f"), 2500000: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), diff --git a/params/config_mordor.go b/params/config_mordor.go index 1198de55ac..4315d558c1 100644 --- a/params/config_mordor.go +++ b/params/config_mordor.go @@ -71,7 +71,7 @@ var ( ECIP1017EraRounds: big.NewInt(2000000), ECIP1010PauseBlock: nil, ECIP1010Length: nil, - ECBP1100FBlock: big.NewInt(2290740), // ETA 15 Sept 2020, ~1500 UTC + ECBP1100FBlock: big.NewInt(2380000), // ETA 29 Sept 2020, ~1500 UTC RequireBlockHashes: map[uint64]common.Hash{ 840013: common.HexToHash("0x2ceada2b191879b71a5bcf2241dd9bc50d6d953f1640e62f9c2cee941dc61c9d"), 840014: common.HexToHash("0x8ec29dd692c8985b82410817bac232fc82805b746538d17bc924624fe74a0fcf"),