Skip to content
This repository was archived by the owner on Feb 17, 2025. It is now read-only.
This repository was archived by the owner on Feb 17, 2025. It is now read-only.

Maybe the zkevm-node api of "TraceBlockByNumber" and "TraceBlockByHash" have a optimize for avoid elapsed time #3733

@lyh169

Description

@lyh169
func (d *DebugEndpoints) TraceBlockByHash(hash types.ArgHash, cfg *traceConfig) (interface{}, types.Error) {
	return d.txMan.NewDbTxScope(d.state, func(ctx context.Context, dbTx pgx.Tx) (interface{}, types.Error) {
		block, err := d.state.GetL2BlockByHash(ctx, hash.Hash(), dbTx)
		if errors.Is(err, state.ErrNotFound) {
			return nil, types.NewRPCError(types.DefaultErrorCode, fmt.Sprintf("block %s not found", hash.Hash().String()))
		} else if err != nil {
			return RPCErrorResponse(types.DefaultErrorCode, "failed to get block by hash", err, true)
		}

		traces, rpcErr := d.buildTraceBlock(ctx, block.Transactions(), cfg, dbTx)
		if rpcErr != nil {
			return nil, rpcErr
		}

		return traces, nil
	})
}
func (d *DebugEndpoints) buildTraceBlock(ctx context.Context, txs []*ethTypes.Transaction, cfg *traceConfig, dbTx pgx.Tx) (interface{}, types.Error) {
	traces := []traceBlockTransactionResponse{}
	for _, tx := range txs {
		traceTransaction, err := d.buildTraceTransaction(ctx, tx.Hash(), cfg, dbTx)
		if err != nil {
			errMsg := fmt.Sprintf("failed to get trace for transaction %v: %v", tx.Hash().String(), err.Error())
			return RPCErrorResponse(types.DefaultErrorCode, errMsg, err, true)
		}
		traceBlockTransaction := traceBlockTransactionResponse{
			Result: traceTransaction,
		}
		traces = append(traces, traceBlockTransaction)
	}

	return traces, nil
}

The code of buildTraceBlock need build the trace transaction one by one; but the buildTraceTransaction need executor the tx from the first tx to the current index tx , so this do a lot of repetitive executor , like flower;

	for _, tx := range l2Block.Transactions() {
		checkReceipt, err := s.GetTransactionReceipt(ctx, tx.Hash(), dbTx)
		if err != nil {
			return nil, err
		}
		if checkReceipt.TransactionIndex < receipt.TransactionIndex {
			count++
		}
	}

	// since the executor only stores the state roots by block, we need to
	// execute all the txs in the block until the tx we want to trace
	var txsToEncode []types.Transaction
	var effectivePercentage []uint8
	for i := 0; i <= count; i++ {
		txsToEncode = append(txsToEncode, *l2Block.Transactions()[i])
		effectivePercentage = append(effectivePercentage, MaxEffectivePercentage)
		log.Debugf("trace will reprocess tx: %v", l2Block.Transactions()[i].Hash().String())
	}

But the go-ethereum traceBlock only need executor all tx of the block once;

func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {
	if block.NumberU64() == 0 {
		return nil, errors.New("genesis is not traceable")
	}
	// Prepare base state
	parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
	if err != nil {
		return nil, err
	}
	reexec := defaultTraceReexec
	if config != nil && config.Reexec != nil {
		reexec = *config.Reexec
	}
	statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
	if err != nil {
		return nil, err
	}
	defer release()
	// JS tracers have high overhead. In this case run a parallel
	// process that generates states in one thread and traces txes
	// in separate worker threads.
	if config != nil && config.Tracer != nil && *config.Tracer != "" {
		if isJS := DefaultDirectory.IsJS(*config.Tracer); isJS {
			return api.traceBlockParallel(ctx, block, statedb, config)
		}
	}
	// Native tracers have low overhead
	var (
		txs       = block.Transactions()
		blockHash = block.Hash()
		blockCtx  = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
		signer    = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
		results   = make([]*txTraceResult, len(txs))
	)
	if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
		vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
		core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
	}
	for i, tx := range txs {
		// Generate the next state snapshot fast without tracing
		msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
		txctx := &Context{
			BlockHash:   blockHash,
			BlockNumber: block.Number(),
			TxIndex:     i,
			TxHash:      tx.Hash(),
		}
		res, err := api.traceTx(ctx, tx, msg, txctx, blockCtx, statedb, config)
		if err != nil {
			return nil, err
		}
		results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res}
	}
	return results, nil
}

Is zkevm-node have a optimizing strategy about this issue? Thanks

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions