Skip to content

Commit

Permalink
feat(op-node): add l1 cache size config
Browse files Browse the repository at this point in the history
  • Loading branch information
welkin22 committed Jul 31, 2024
1 parent cf5a885 commit 95c934f
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 2 deletions.
2 changes: 1 addition & 1 deletion op-chain-ops/cmd/check-derivation/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func newClientsFromContext(cliCtx *cli.Context) (*ethclient.Client, *sources.Eth
MethodResetDuration: time.Minute,
}
cl := ethclient.NewClient(clients.L2RpcClient)
ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, &ethClCfg)
ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, &ethClCfg, false)
if err != nil {
return nil, nil, err
}
Expand Down
8 changes: 8 additions & 0 deletions op-node/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,13 @@ var (
Value: 20,
Category: L1RPCCategory,
}
L1RPCMaxCacheSize = &cli.IntFlag{
Name: "l1.rpc-max-cache-size",
Usage: "The maximum cache size of the L1 client. it should be greater than or equal to the max lag of unsafe and safe block heights. Must be greater than or equal to 1",
EnvVars: prefixEnvVars("L1_RPC_MAX_CACHE_SIZE"),
Value: 1000,
Category: L1RPCCategory,
}
L1HTTPPollInterval = &cli.DurationFlag{
Name: "l1.http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
Expand Down Expand Up @@ -417,6 +424,7 @@ var optionalFlags = []cli.Flag{
L1RPCProviderKind,
L1RPCRateLimit,
L1RPCMaxBatchSize,
L1RPCMaxCacheSize,
L1RPCMaxConcurrency,
L1HTTPPollInterval,
L1ArchiveBlobRpcAddr,
Expand Down
14 changes: 14 additions & 0 deletions op-node/node/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ type L1EndpointConfig struct {
// BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set).
BatchSize int

// CacheSize specifies the maximum cache size of l1 client. it should be greater than or equal to the max lag of unsafe and safe block heights.
CacheSize int

// MaxConcurrency specifies the maximum number of concurrent requests to the L1 RPC.
MaxConcurrency int

Expand All @@ -133,6 +136,9 @@ func (cfg *L1EndpointConfig) Check() error {
if cfg.BatchSize < 1 || cfg.BatchSize > 500 {
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
}
if cfg.CacheSize < 1 {
return fmt.Errorf("cache size is invalid or unreasonable: %d", cfg.CacheSize)
}
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
}
Expand Down Expand Up @@ -163,6 +169,10 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
rpcCfg.ReceiptsCacheSize = cfg.CacheSize
rpcCfg.HeadersCacheSize = cfg.CacheSize
rpcCfg.TransactionsCacheSize = cfg.CacheSize
rpcCfg.PayloadsCacheSize = cfg.CacheSize
return l1Node, rpcCfg, nil
}

Expand All @@ -177,6 +187,10 @@ func fallbackClientWrap(ctx context.Context, logger log.Logger, urlList []string
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
rpcCfg.ReceiptsCacheSize = cfg.CacheSize
rpcCfg.HeadersCacheSize = cfg.CacheSize
rpcCfg.TransactionsCacheSize = cfg.CacheSize
rpcCfg.PayloadsCacheSize = cfg.CacheSize
return l1Node, rpcCfg, nil
}

Expand Down
1 change: 1 addition & 0 deletions op-node/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(flags.L1RPCProviderKind.Name))),
RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
CacheSize: ctx.Int(flags.L1RPCMaxCacheSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
}
Expand Down
4 changes: 3 additions & 1 deletion op-service/sources/l1_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,9 @@ func (s *L1Client) GoOrUpdatePreFetchReceipts(ctx context.Context, l1Start uint6
continue
}
if !isSuccess {
s.log.Debug("pre fetch receipts fail without error,need retry", "blockHash", blockInfo.Hash, "blockNumber", blockNumber)
s.log.Debug("The receipts cache may be full. "+
"please ensure the difference between the safe block height and the unsafe block height is less than or equal to the cache size.",
"blockHash", blockInfo.Hash, "blockNumber", blockNumber)
time.Sleep(1 * time.Second)
continue
}
Expand Down

0 comments on commit 95c934f

Please sign in to comment.