Skip to content

add triedb busy error #494

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions arbitrum/apibackend.go
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ func (a *APIBackend) NewMatcherBackend() filtermaps.MatcherBackend {
return a.b.filterMaps.NewMatcherBackend()
}

func StateAndHeaderFromHeader(ctx context.Context, chainDb ethdb.Database, bc *core.BlockChain, maxRecreateStateDepth int64, header *types.Header, err error, archiveClientsManager *archiveFallbackClientsManager) (*state.StateDB, *types.Header, error) {
func StateAndHeaderFromHeader(ctx context.Context, chainDb ethdb.Database, bc *core.BlockChain, maxRecreateStateDepth int64, errorWhenTriedbBusy bool, header *types.Header, err error, archiveClientsManager *archiveFallbackClientsManager) (*state.StateDB, *types.Header, error) {
if err != nil {
return nil, header, err
}
Expand All @@ -545,6 +545,9 @@ func StateAndHeaderFromHeader(ctx context.Context, chainDb ethdb.Database, bc *c
if archiveClientsManager != nil && header.Number.Uint64() <= archiveClientsManager.lastAvailableBlock() {
return nil, header, &types.ErrUseArchiveFallback{BlockNum: header.Number.Uint64()}
}
if errorWhenTriedbBusy && bc.StateCache().TrieDB().IsBusyCommitting() {
return nil, header, errors.New("please retry, triedb is busy committing state")
}
stateFor := func(db state.Database, snapshots *snapshot.Tree) func(header *types.Header) (*state.StateDB, StateReleaseFunc, error) {
return func(header *types.Header) (*state.StateDB, StateReleaseFunc, error) {
if header.Root != (common.Hash{}) {
Expand Down Expand Up @@ -618,7 +621,7 @@ func StateAndHeaderFromHeader(ctx context.Context, chainDb ethdb.Database, bc *c

func (a *APIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
header, err := a.HeaderByNumber(ctx, number)
return StateAndHeaderFromHeader(ctx, a.ChainDb(), a.b.arb.BlockChain(), a.b.config.MaxRecreateStateDepth, header, err, a.archiveClientsManager)
return StateAndHeaderFromHeader(ctx, a.ChainDb(), a.b.arb.BlockChain(), a.b.config.MaxRecreateStateDepth, a.b.config.ErrorWhenTriedbBusy, header, err, a.archiveClientsManager)
}

func (a *APIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
Expand All @@ -629,7 +632,7 @@ func (a *APIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOr
if ishash && header != nil && header.Number.Cmp(bc.CurrentBlock().Number) > 0 && bc.GetCanonicalHash(header.Number.Uint64()) != hash {
return nil, nil, errors.New("requested block ahead of current block and the hash is not currently canonical")
}
return StateAndHeaderFromHeader(ctx, a.ChainDb(), a.b.arb.BlockChain(), a.b.config.MaxRecreateStateDepth, header, err, a.archiveClientsManager)
return StateAndHeaderFromHeader(ctx, a.ChainDb(), a.b.arb.BlockChain(), a.b.config.MaxRecreateStateDepth, a.b.config.ErrorWhenTriedbBusy, header, err, a.archiveClientsManager)
}

func (a *APIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
Expand Down
3 changes: 3 additions & 0 deletions arbitrum/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type Config struct {
ClassicRedirect string `koanf:"classic-redirect"`
ClassicRedirectTimeout time.Duration `koanf:"classic-redirect-timeout"`
MaxRecreateStateDepth int64 `koanf:"max-recreate-state-depth"`
ErrorWhenTriedbBusy bool `koanf:"error-when-triedb-busy"`

AllowMethod []string `koanf:"allow-method"`

Expand Down Expand Up @@ -89,6 +90,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".filter-log-cache-size", DefaultConfig.FilterLogCacheSize, "log filter system maximum number of cached blocks")
f.Duration(prefix+".filter-timeout", DefaultConfig.FilterTimeout, "log filter system maximum time filters stay active")
f.Int64(prefix+".max-recreate-state-depth", DefaultConfig.MaxRecreateStateDepth, "maximum depth for recreating state, measured in l2 gas (0=don't recreate state, -1=infinite, -2=use default value for archive or non-archive node (whichever is configured))")
f.Bool(prefix+".error-when-triedb-busy", DefaultConfig.ErrorWhenTriedbBusy, "if enabled, rpc calls requiring state access will return an error when triedb is busy committing state; otherwise the request will wait and might timeout")
f.StringSlice(prefix+".allow-method", DefaultConfig.AllowMethod, "list of whitelisted rpc methods")
arbDebug := DefaultConfig.ArbDebug
f.Uint64(prefix+".arbdebug.block-range-bound", arbDebug.BlockRangeBound, "bounds the number of blocks arbdebug calls may return")
Expand All @@ -114,6 +116,7 @@ var DefaultConfig = Config{
FeeHistoryMaxBlockCount: 1024,
ClassicRedirect: "",
MaxRecreateStateDepth: UninitializedMaxRecreateStateDepth, // default value should be set for depending on node type (archive / non-archive)
ErrorWhenTriedbBusy: false,
AllowMethod: []string{},
ArbDebug: ArbDebugConfig{
BlockRangeBound: 256,
Expand Down
10 changes: 10 additions & 0 deletions triedb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -332,3 +332,13 @@ func (db *Database) IsVerkle() bool {
func (db *Database) Disk() ethdb.Database {
return db.disk
}

// HashScheme: check if we are busy committing and holding main db lock for a longer time
// PathScheme: always false (not necessary)
func (db *Database) IsBusyCommitting() bool {
hdb, ok := db.backend.(*hashdb.Database)
if !ok {
return false
}
return hdb.IsBusyCommitting()
}
13 changes: 13 additions & 0 deletions triedb/hashdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"reflect"
"sync"
"sync/atomic"
"time"

"github.com/VictoriaMetrics/fastcache"
Expand Down Expand Up @@ -78,6 +79,9 @@ var Defaults = &Config{
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
type Database struct {
// Arbitrum:
committing atomic.Bool

diskdb ethdb.Database // Persistent storage for matured trie nodes
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
Expand Down Expand Up @@ -410,6 +414,9 @@ func (db *Database) Commit(node common.Hash, report bool) error {
db.lock.Lock()
defer db.lock.Unlock()

db.committing.Store(true)
defer db.committing.Store(false)

// Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured
Expand Down Expand Up @@ -644,3 +651,9 @@ func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
return nil, errors.New("not implemented")
}

// returns true if hashdb is performing commit in this moment
// thread safe - can be called concurrently, but does not guarantee that the returned result remains true after the call
func (db *Database) IsBusyCommitting() bool {
return db.committing.Load()
}
Loading