diff --git a/op-node/cmd/main.go b/op-node/cmd/main.go index deacab727fb4c..f677985eea9b2 100644 --- a/op-node/cmd/main.go +++ b/op-node/cmd/main.go @@ -87,6 +87,8 @@ func RollupNodeMain(ctx *cli.Context, closeApp context.CancelCauseFunc) (cliapp. } cfg.Cancel = closeApp + cfg.HemitrapEnabled = ctx.Bool(flags.HemitrapEnabled.Name) + // Only pretty-print the banner if it is a terminal log. Otherwise log it as key-value pairs. if logCfg.Format == "terminal" { log.Info("rollup config:\n" + cfg.Rollup.Description(chaincfg.L2ChainIDToNetworkDisplayName)) diff --git a/op-node/config/config.go b/op-node/config/config.go index fb53f31f28104..cff0cd51677cb 100644 --- a/op-node/config/config.go +++ b/op-node/config/config.go @@ -93,6 +93,8 @@ type Config struct { // Experimental. Enables new opstack RPC namespace. Used by op-test-sequencer. ExperimentalOPStackAPI bool + + HemitrapEnabled bool } // ConductorRPCFunc retrieves the endpoint. The RPC may not immediately be available. diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index 522f4e7fa6b4f..a5d29e20846cb 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -79,6 +79,12 @@ var ( Category: RollupCategory, } /* Optional Flags */ + HemitrapEnabled = &cli.BoolFlag{ + Name: "hemitrap.enabled", + Usage: "skips some validation checks during sequencing for use with hemitrap", + Required: false, + EnvVars: prefixEnvVars("HEMITRAP_ENABLED"), + } BeaconHeader = &cli.StringFlag{ Name: "l1.beacon-header", Usage: "Optional HTTP header to add to all requests to the L1 Beacon endpoint. Format: 'X-Key: Value'", @@ -507,6 +513,7 @@ var optionalFlags = []cli.Flag{ InteropDependencySet, IgnoreMissingPectraBlobSchedule, ExperimentalOPStackAPI, + HemitrapEnabled, } var DeprecatedFlags = []cli.Flag{ diff --git a/op-node/node/node.go b/op-node/node/node.go index 4ebe271566ec1..6d688ad06db90 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -345,6 +345,9 @@ func initL1Handlers(cfg *config.Config, node *OpNode) (ethereum.Subscription, et if node.l2Driver == nil { return nil, nil, nil, errors.New("l2 driver must be initialized") } + + cfg.HemitrapEnabled = cfg.HemitrapEnabled + onL1Head := func(ctx context.Context, sig eth.L1BlockRef) { // TODO(#16917) Remove Event System Refactor Comments // L1UnsafeEvent fan out is updated to procedural method calls @@ -395,7 +398,7 @@ func initL1Handlers(cfg *config.Config, node *OpNode) (ethereum.Subscription, et // note: this function relies on side effects to set node.runCfg func initRuntimeConfig(ctx context.Context, cfg *config.Config, node *OpNode) error { // attempt to load runtime config, repeat N times - runCfg := runcfg.NewRuntimeConfig(node.log, node.l1Source, &cfg.Rollup) + runCfg := runcfg.NewRuntimeConfig(node.log, node.l1Source, &cfg.Rollup, cfg.HemitrapEnabled) // Set node.runCfg early so handleProtocolVersionsUpdate can access it during initialization node.runCfg = runCfg @@ -607,7 +610,7 @@ func initL2(ctx context.Context, cfg *config.Config, node *OpNode) (*sources.Eng } l2Driver := driver.NewDriver(node.eventSys, node.eventDrain, &cfg.Driver, &cfg.Rollup, cfg.L1ChainConfig, cfg.DependencySet, l2Source, node.l1Source, upstreamFollowSource, - node.beacon, node, node, node.log, node.metrics, cfg.ConfigPersistence, safeDB, &cfg.Sync, sequencerConductor, altDA, indexingMode) + node.beacon, node, node, node.log, node.metrics, cfg.ConfigPersistence, safeDB, &cfg.Sync, sequencerConductor, altDA, indexingMode, cfg.HemitrapEnabled) // Wire up IndexingMode to engine controller for direct procedure call if sys != nil { diff --git a/op-node/node/runcfg/runtime_config.go b/op-node/node/runcfg/runtime_config.go index 655a229b9ccd8..ecd38acb1384b 100644 --- a/op-node/node/runcfg/runtime_config.go +++ b/op-node/node/runcfg/runtime_config.go @@ -55,6 +55,8 @@ type RuntimeConfig struct { l1Ref eth.L1BlockRef runtimeConfigData + + hemitrapEnabled bool } // runtimeConfigData is a flat bundle of configurable data, easy and light to copy around. @@ -68,11 +70,12 @@ type runtimeConfigData struct { var _ p2p.GossipRuntimeConfig = (*RuntimeConfig)(nil) -func NewRuntimeConfig(log log.Logger, l1Client RuntimeCfgL1Source, rollupCfg *rollup.Config) *RuntimeConfig { +func NewRuntimeConfig(log log.Logger, l1Client RuntimeCfgL1Source, rollupCfg *rollup.Config, hemitrapEnabled bool) *RuntimeConfig { return &RuntimeConfig{ - log: log, - l1Client: l1Client, - rollupCfg: rollupCfg, + log: log, + l1Client: l1Client, + rollupCfg: rollupCfg, + hemitrapEnabled: hemitrapEnabled, } } diff --git a/op-node/rollup/attributes/attributes.go b/op-node/rollup/attributes/attributes.go index fef5f5ddb3848..d1763d74f9480 100644 --- a/op-node/rollup/attributes/attributes.go +++ b/op-node/rollup/attributes/attributes.go @@ -49,9 +49,11 @@ type AttributesHandler struct { sentAttributes bool engineController EngineController + + hemitrapEnabled bool } -func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Context, l2 L2, engController EngineController) *AttributesHandler { +func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Context, l2 L2, engController EngineController, hemitrapEnabled bool) *AttributesHandler { if engController == nil { panic("engController cannot be nil") } @@ -62,6 +64,7 @@ func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Contex l2: l2, engineController: engController, attributes: nil, + hemitrapEnabled: hemitrapEnabled, } } @@ -214,7 +217,7 @@ func (eq *AttributesHandler) consolidateNextSafeAttributes(attributes *derive.At }) return } - if err := AttributesMatchBlock(eq.cfg, attributes.Attributes, onto.Hash, envelope, eq.log); err != nil { + if err := AttributesMatchBlock(eq.cfg, attributes.Attributes, onto.Hash, envelope, eq.log); err != nil && !eq.hemitrapEnabled { eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", envelope.ExecutionPayload.ID(), "pending_safe", onto) diff --git a/op-node/rollup/derive/blob_data_source.go b/op-node/rollup/derive/blob_data_source.go index 2c4626941b8b5..9fe5ba5542717 100644 --- a/op-node/rollup/derive/blob_data_source.go +++ b/op-node/rollup/derive/blob_data_source.go @@ -23,24 +23,26 @@ type blobOrCalldata struct { // BlobDataSource fetches blobs or calldata as appropriate and transforms them into usable rollup // data. type BlobDataSource struct { - data []blobOrCalldata - ref eth.L1BlockRef - batcherAddr common.Address - dsCfg DataSourceConfig - fetcher L1TransactionFetcher - blobsFetcher L1BlobsFetcher - log log.Logger + data []blobOrCalldata + ref eth.L1BlockRef + batcherAddr common.Address + dsCfg DataSourceConfig + fetcher L1TransactionFetcher + blobsFetcher L1BlobsFetcher + log log.Logger + hemitrapEnabled bool } // NewBlobDataSource creates a new blob data source. -func NewBlobDataSource(ctx context.Context, log log.Logger, dsCfg DataSourceConfig, fetcher L1TransactionFetcher, blobsFetcher L1BlobsFetcher, ref eth.L1BlockRef, batcherAddr common.Address) DataIter { +func NewBlobDataSource(ctx context.Context, log log.Logger, dsCfg DataSourceConfig, fetcher L1TransactionFetcher, blobsFetcher L1BlobsFetcher, ref eth.L1BlockRef, batcherAddr common.Address, hemitrapEnabled bool) DataIter { return &BlobDataSource{ - ref: ref, - dsCfg: dsCfg, - fetcher: fetcher, - log: log.New("origin", ref), - batcherAddr: batcherAddr, - blobsFetcher: blobsFetcher, + ref: ref, + dsCfg: dsCfg, + fetcher: fetcher, + log: log.New("origin", ref), + batcherAddr: batcherAddr, + blobsFetcher: blobsFetcher, + hemitrapEnabled: hemitrapEnabled, } } @@ -93,15 +95,24 @@ func (ds *BlobDataSource) open(ctx context.Context) ([]blobOrCalldata, error) { return data, nil } - // download the actual blob bodies corresponding to the indexed blob hashes - blobs, err := ds.blobsFetcher.GetBlobs(ctx, ds.ref, hashes) - if errors.Is(err, ethereum.NotFound) { - // If the L1 block was available, then the blobs should be available too. The only - // exception is if the blob retention window has expired, which we will ultimately handle - // by failing over to a blob archival service. - return nil, NewResetError(fmt.Errorf("failed to fetch blobs: %w", err)) - } else if err != nil { - return nil, NewTemporaryError(fmt.Errorf("failed to fetch blobs: %w", err)) + var blobs []*eth.Blob + + if ds.hemitrapEnabled { + blobs = make([]*eth.Blob, len(data)) + for i := range data { + blobs[i] = ð.Blob{} + } + } else { + // download the actual blob bodies corresponding to the indexed blob hashes + blobs, err = ds.blobsFetcher.GetBlobs(ctx, ds.ref, hashes) + if errors.Is(err, ethereum.NotFound) { + // If the L1 block was available, then the blobs should be available too. The only + // exception is if the blob retention window has expired, which we will ultimately handle + // by failing over to a blob archival service. + return nil, NewResetError(fmt.Errorf("failed to fetch blobs: %w", err)) + } else if err != nil { + return nil, NewTemporaryError(fmt.Errorf("failed to fetch blobs: %w", err)) + } } // go back over the data array and populate the blob pointers diff --git a/op-node/rollup/derive/data_source.go b/op-node/rollup/derive/data_source.go index dfeda599501a1..d33c0592b114c 100644 --- a/op-node/rollup/derive/data_source.go +++ b/op-node/rollup/derive/data_source.go @@ -64,7 +64,7 @@ func NewDataSourceFactory(log log.Logger, cfg *rollup.Config, fetcher L1Fetcher, } // OpenData returns the appropriate data source for the L1 block `ref`. -func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address) (DataIter, error) { +func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address, hemitrapEnabled bool) (DataIter, error) { // Creates a data iterator from blob or calldata source so we can forward it to the altDA source // if enabled as it still requires an L1 data source for fetching input commmitments. var src DataIter @@ -72,7 +72,7 @@ func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, b if ds.blobsFetcher == nil { return nil, fmt.Errorf("ecotone upgrade active but beacon endpoint not configured") } - src = NewBlobDataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ds.blobsFetcher, ref, batcherAddr) + src = NewBlobDataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ds.blobsFetcher, ref, batcherAddr, hemitrapEnabled) } else { src = NewCalldataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ref, batcherAddr) } diff --git a/op-node/rollup/derive/l1_retrieval.go b/op-node/rollup/derive/l1_retrieval.go index 87b68b96522f3..fc6dc30d07f42 100644 --- a/op-node/rollup/derive/l1_retrieval.go +++ b/op-node/rollup/derive/l1_retrieval.go @@ -12,7 +12,7 @@ import ( ) type DataAvailabilitySource interface { - OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address) (DataIter, error) + OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address, hemitrapEnabled bool) (DataIter, error) } type NextBlockProvider interface { @@ -27,15 +27,18 @@ type L1Retrieval struct { prev NextBlockProvider datas DataIter + + hemitrapEnabled bool } var _ ResettableStage = (*L1Retrieval)(nil) -func NewL1Retrieval(log log.Logger, dataSrc DataAvailabilitySource, prev NextBlockProvider) *L1Retrieval { +func NewL1Retrieval(log log.Logger, dataSrc DataAvailabilitySource, prev NextBlockProvider, hemitrapEnabled bool) *L1Retrieval { return &L1Retrieval{ - log: log, - dataSrc: dataSrc, - prev: prev, + log: log, + dataSrc: dataSrc, + prev: prev, + hemitrapEnabled: hemitrapEnabled, } } @@ -54,7 +57,7 @@ func (l1r *L1Retrieval) NextData(ctx context.Context) ([]byte, error) { } else if err != nil { return nil, err } - if l1r.datas, err = l1r.dataSrc.OpenData(ctx, next, l1r.prev.SystemConfig().BatcherAddr); err != nil { + if l1r.datas, err = l1r.dataSrc.OpenData(ctx, next, l1r.prev.SystemConfig().BatcherAddr, l1r.hemitrapEnabled); err != nil { return nil, fmt.Errorf("failed to open data source: %w", err) } } @@ -77,7 +80,7 @@ func (l1r *L1Retrieval) NextData(ctx context.Context) ([]byte, error) { // internal invariants that later propagate up the derivation pipeline. func (l1r *L1Retrieval) Reset(ctx context.Context, base eth.L1BlockRef, sysCfg eth.SystemConfig) error { var err error - if l1r.datas, err = l1r.dataSrc.OpenData(ctx, base, sysCfg.BatcherAddr); err != nil { + if l1r.datas, err = l1r.dataSrc.OpenData(ctx, base, sysCfg.BatcherAddr, l1r.hemitrapEnabled); err != nil { return fmt.Errorf("failed to open data source: %w", err) } l1r.log.Info("Reset of L1Retrieval done", "origin", base) diff --git a/op-node/rollup/derive/l1_retrieval_test.go b/op-node/rollup/derive/l1_retrieval_test.go index 8851957b43a1a..6c3bb921802c0 100644 --- a/op-node/rollup/derive/l1_retrieval_test.go +++ b/op-node/rollup/derive/l1_retrieval_test.go @@ -35,7 +35,7 @@ type MockDataSource struct { mock.Mock } -func (m *MockDataSource) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address) (DataIter, error) { +func (m *MockDataSource) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address, hemitrapEnabled bool) (DataIter, error) { out := m.Mock.MethodCalled("OpenData", ref, batcherAddr) return out[0].(DataIter), nil } diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index 67f0511eac456..1ef1cad889be3 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -98,7 +98,7 @@ type DerivationPipeline struct { // NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs. func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, depSet DependencySet, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, - altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, managedBySupervisor bool, l1ChainConfig *params.ChainConfig, + altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, managedBySupervisor bool, l1ChainConfig *params.ChainConfig, hemitrapEnabled bool, ) *DerivationPipeline { spec := rollup.NewChainSpec(rollupCfg) // Stages are strung together into a pipeline, @@ -110,7 +110,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, depSet Depe l1Traversal = NewL1Traversal(log, rollupCfg, l1Fetcher) } dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs, altDA) // auxiliary stage for L1Retrieval - l1Src := NewL1Retrieval(log, dataSrc, l1Traversal) + l1Src := NewL1Retrieval(log, dataSrc, l1Traversal, hemitrapEnabled) frameQueue := NewFrameQueue(log, rollupCfg, l1Src) channelMux := NewChannelMux(log, spec, frameQueue, metrics) chInReader := NewChannelInReader(rollupCfg, log, channelMux, metrics) diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index e03d168451d4a..80297c20020c8 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -49,6 +49,7 @@ func NewDriver( sequencerConductor conductor.SequencerConductor, altDA AltDAIface, indexingMode bool, + hemitrapEnabled bool, ) *Driver { driverCtx, driverCancel := context.WithCancel(context.Background()) @@ -72,10 +73,10 @@ func NewDriver( } sys.Register("finalizer", finalizer) - attrHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, ec) + attrHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, ec, hemitrapEnabled) sys.Register("attributes-handler", attrHandler) - derivationPipeline := derive.NewDerivationPipeline(log, cfg, depSet, verifConfDepth, l1Blobs, altDA, l2, metrics, indexingMode, l1ChainConfig) + derivationPipeline := derive.NewDerivationPipeline(log, cfg, depSet, verifConfDepth, l1Blobs, altDA, l2, metrics, indexingMode, l1ChainConfig, hemitrapEnabled) pipelineDeriver := derive.NewPipelineDeriver(driverCtx, derivationPipeline) sys.Register("pipeline", pipelineDeriver) @@ -145,6 +146,7 @@ func NewDriver( metrics: metrics, altSync: altSync, upstreamFollowSource: upstreamFollowSource, + hemitrapEnabled: hemitrapEnabled, } return driver @@ -188,6 +190,8 @@ type Driver struct { driverCancel context.CancelFunc upstreamFollowSource UpstreamFollowSource + + hemitrapEnabled bool } // Start starts up the state loop.