From 83b25d6a5c58f8b8c05009871bc09d94edbbd4a0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 13:30:34 +0100 Subject: [PATCH 01/21] refactor(store): replace go-header store by ev-node store --- apps/evm/cmd/rollback.go | 52 +-- apps/testapp/cmd/rollback.go | 47 +-- node/failover.go | 11 +- node/full.go | 4 +- node/light.go | 2 +- pkg/store/README.md | 94 +++++ pkg/store/data_store_adapter.go | 345 ++++++++++++++++ pkg/store/data_store_adapter_test.go | 552 +++++++++++++++++++++++++ pkg/store/header_store_adapter.go | 345 ++++++++++++++++ pkg/store/header_store_adapter_test.go | 474 +++++++++++++++++++++ pkg/sync/sync_service.go | 73 ++-- pkg/sync/sync_service_test.go | 6 +- 12 files changed, 1872 insertions(+), 133 deletions(-) create mode 100644 pkg/store/data_store_adapter.go create mode 100644 pkg/store/data_store_adapter_test.go create mode 100644 pkg/store/header_store_adapter.go create mode 100644 pkg/store/header_store_adapter_test.go diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index 8fefb4f0ec..65ceb768cf 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -11,11 +11,9 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/spf13/cobra" - goheaderstore "github.com/celestiaorg/go-header/store" "github.com/evstack/ev-node/execution/evm" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" ) // NewRollbackCmd creates a command to rollback ev-node state by one height. @@ -63,9 +61,14 @@ func NewRollbackCmd() *cobra.Command { height = currentHeight - 1 } + var errs error + // rollback ev-node main state + // Note: With the unified store approach, the ev-node store is the single source of truth. + // The store adapters (HeaderStoreAdapter/DataStoreAdapter) read from this store, + // so rolling back the ev-node store automatically affects P2P sync operations. if err := evolveStore.Rollback(goCtx, height, !syncNode); err != nil { - return fmt.Errorf("failed to rollback ev-node state: %w", err) + errs = errors.Join(errs, fmt.Errorf("failed to rollback ev-node state: %w", err)) } // rollback execution layer via EngineClient @@ -74,47 +77,10 @@ func NewRollbackCmd() *cobra.Command { cmd.Printf("Warning: failed to create engine client, skipping EL rollback: %v\n", err) } else { if err := engineClient.Rollback(goCtx, height); err != nil { - return fmt.Errorf("failed to rollback execution layer: %w", err) + errs = errors.Join(errs, fmt.Errorf("failed to rollback execution layer: %w", err)) + } else { + cmd.Printf("Rolled back execution layer to height %d\n", height) } - cmd.Printf("Rolled back execution layer to height %d\n", height) - } - - // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( - evolveDB, - goheaderstore.WithStorePrefix("headerSync"), - goheaderstore.WithMetrics(), - ) - if err != nil { - return err - } - - dataStore, err := goheaderstore.NewStore[*types.Data]( - evolveDB, - goheaderstore.WithStorePrefix("dataSync"), - goheaderstore.WithMetrics(), - ) - if err != nil { - return err - } - - if err := headerStore.Start(goCtx); err != nil { - return fmt.Errorf("failed to start header store: %w", err) - } - defer headerStore.Stop(goCtx) - - if err := dataStore.Start(goCtx); err != nil { - return fmt.Errorf("failed to start data store: %w", err) - } - defer dataStore.Stop(goCtx) - - var errs error - if err := headerStore.DeleteRange(goCtx, height+1, headerStore.Height()); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback header sync service state: %w", err)) - } - - if err := dataStore.DeleteRange(goCtx, height+1, dataStore.Height()); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback data sync service state: %w", err)) } cmd.Printf("Rolled back ev-node state to height %d\n", height) diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index dfea32176f..2af9bb04df 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -8,9 +8,7 @@ import ( kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" rollcmd "github.com/evstack/ev-node/pkg/cmd" "github.com/evstack/ev-node/pkg/store" - "github.com/evstack/ev-node/types" - goheaderstore "github.com/celestiaorg/go-header/store" "github.com/spf13/cobra" ) @@ -64,47 +62,14 @@ func NewRollbackCmd() *cobra.Command { return err } - // rollback ev-node main state - if err := evolveStore.Rollback(goCtx, height, !syncNode); err != nil { - return fmt.Errorf("failed to rollback ev-node state: %w", err) - } - - // rollback ev-node goheader state - headerStore, err := goheaderstore.NewStore[*types.SignedHeader]( - evolveDB, - goheaderstore.WithStorePrefix("headerSync"), - goheaderstore.WithMetrics(), - ) - if err != nil { - return err - } - - dataStore, err := goheaderstore.NewStore[*types.Data]( - evolveDB, - goheaderstore.WithStorePrefix("dataSync"), - goheaderstore.WithMetrics(), - ) - if err != nil { - return err - } - - if err := headerStore.Start(goCtx); err != nil { - return err - } - defer headerStore.Stop(goCtx) - - if err := dataStore.Start(goCtx); err != nil { - return err - } - defer dataStore.Stop(goCtx) - var errs error - if err := headerStore.DeleteRange(goCtx, height+1, headerStore.Height()); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback header sync service state: %w", err)) - } - if err := dataStore.DeleteRange(goCtx, height+1, dataStore.Height()); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback data sync service state: %w", err)) + // rollback ev-node main state + // Note: With the unified store approach, the ev-node store is the single source of truth. + // The store adapters (HeaderStoreAdapter/DataStoreAdapter) read from this store, + // so rolling back the ev-node store automatically affects P2P sync operations. + if err := evolveStore.Rollback(goCtx, height, !syncNode); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to rollback ev-node state: %w", err)) } // rollback execution store diff --git a/node/failover.go b/node/failover.go index 27f026843d..27f4ddf685 100644 --- a/node/failover.go +++ b/node/failover.go @@ -46,7 +46,6 @@ func newSyncMode( da block.DAClient, logger zerolog.Logger, rktStore store.Store, - mainKV ds.Batching, blockMetrics *block.Metrics, nodeOpts NodeOptions, raftNode *raft.Node, @@ -66,7 +65,7 @@ func newSyncMode( raftNode, ) } - return setupFailoverState(nodeConfig, nodeKey, rootDB, daStore, genesis, logger, mainKV, rktStore, blockComponentsFn, raftNode) + return setupFailoverState(nodeConfig, nodeKey, rootDB, daStore, genesis, logger, rktStore, blockComponentsFn, raftNode) } func newAggregatorMode( @@ -81,7 +80,6 @@ func newAggregatorMode( da block.DAClient, logger zerolog.Logger, rktStore store.Store, - mainKV ds.Batching, blockMetrics *block.Metrics, nodeOpts NodeOptions, raftNode *raft.Node, @@ -105,7 +103,7 @@ func newAggregatorMode( ) } - return setupFailoverState(nodeConfig, nodeKey, rootDB, daStore, genesis, logger, mainKV, rktStore, blockComponentsFn, raftNode) + return setupFailoverState(nodeConfig, nodeKey, rootDB, daStore, genesis, logger, rktStore, blockComponentsFn, raftNode) } func setupFailoverState( @@ -115,7 +113,6 @@ func setupFailoverState( daStore store.Store, genesis genesispkg.Genesis, logger zerolog.Logger, - mainKV ds.Batching, rktStore store.Store, buildComponentsFn func(headerSyncService *evsync.HeaderSyncService, dataSyncService *evsync.DataSyncService) (*block.Components, error), raftNode *raft.Node, @@ -125,12 +122,12 @@ func setupFailoverState( return nil, err } - headerSyncService, err := evsync.NewHeaderSyncService(mainKV, daStore, nodeConfig, genesis, p2pClient, logger.With().Str("component", "HeaderSyncService").Logger()) + headerSyncService, err := evsync.NewHeaderSyncService(daStore, nodeConfig, genesis, p2pClient, logger.With().Str("component", "HeaderSyncService").Logger()) if err != nil { return nil, fmt.Errorf("error while initializing HeaderSyncService: %w", err) } - dataSyncService, err := evsync.NewDataSyncService(mainKV, daStore, nodeConfig, genesis, p2pClient, logger.With().Str("component", "DataSyncService").Logger()) + dataSyncService, err := evsync.NewDataSyncService(daStore, nodeConfig, genesis, p2pClient, logger.With().Str("component", "DataSyncService").Logger()) if err != nil { return nil, fmt.Errorf("error while initializing DataSyncService: %w", err) } diff --git a/node/full.go b/node/full.go index 6becc15c90..4fa2ff7c52 100644 --- a/node/full.go +++ b/node/full.go @@ -107,12 +107,12 @@ func newFullNode( logger.Info().Msg("Starting aggregator-MODE") nodeConfig.Node.Aggregator = true nodeConfig.P2P.Peers = "" // peers are not supported in aggregator mode - return newAggregatorMode(nodeConfig, nodeKey, signer, genesis, database, evstore, exec, sequencer, daClient, logger, evstore, mainKV, blockMetrics, nodeOpts, raftNode) + return newAggregatorMode(nodeConfig, nodeKey, signer, genesis, database, evstore, exec, sequencer, daClient, logger, evstore, blockMetrics, nodeOpts, raftNode) } followerFactory := func() (raftpkg.Runnable, error) { logger.Info().Msg("Starting sync-MODE") nodeConfig.Node.Aggregator = false - return newSyncMode(nodeConfig, nodeKey, genesis, database, evstore, exec, daClient, logger, evstore, mainKV, blockMetrics, nodeOpts, raftNode) + return newSyncMode(nodeConfig, nodeKey, genesis, database, evstore, exec, daClient, logger, evstore, blockMetrics, nodeOpts, raftNode) } // Initialize raft node if enabled (for both aggregator and sync nodes) diff --git a/node/light.go b/node/light.go index 5c9ec183e2..8790507a07 100644 --- a/node/light.go +++ b/node/light.go @@ -57,7 +57,7 @@ func newLightNode( return nil, fmt.Errorf("failed to create cached store: %w", err) } - headerSyncService, err := sync.NewHeaderSyncService(database, cachedStore, conf, genesis, p2pClient, componentLogger) + headerSyncService, err := sync.NewHeaderSyncService(cachedStore, conf, genesis, p2pClient, componentLogger) if err != nil { return nil, fmt.Errorf("error while initializing HeaderSyncService: %w", err) } diff --git a/pkg/store/README.md b/pkg/store/README.md index ca052a6f10..b5f58b36d0 100644 --- a/pkg/store/README.md +++ b/pkg/store/README.md @@ -176,3 +176,97 @@ batch.Delete(ctx, key3) // Commit all operations atomically err = batch.Commit(ctx) ``` + +## Store Adapters for P2P Integration + +The store package provides adapter implementations that wrap the ev-node store to satisfy the `header.Store[H]` interface from the `go-header` library. This enables the ev-node store to be used directly by go-header's P2P infrastructure, eliminating data duplication. + +### Background + +Previously, ev-node maintained redundant storage: + +1. **ev-node store** - Primary store for headers, data, state, and metadata +2. **go-header stores** - Separate stores for P2P sync (headerSync and dataSync prefixes) + +This resulted in the same headers and data being stored multiple times. + +### Solution: Store Adapters + +Two adapter types wrap the ev-node store: + +- **`HeaderStoreAdapter`** - Implements `header.Store[*types.SignedHeader]` +- **`DataStoreAdapter`** - Implements `header.Store[*types.Data]` + +These adapters: + +- Read from the single ev-node store +- Write to the ev-node store when receiving data via P2P +- Provide the interface required by go-header's exchange server, syncer, and subscriber + +### Usage + +```go +// Create the ev-node store +evStore := store.New(kvStore) + +// Create adapters for P2P infrastructure +headerAdapter := store.NewHeaderStoreAdapter(evStore) +dataAdapter := store.NewDataStoreAdapter(evStore) + +// Use with go-header P2P components +exchangeServer, _ := goheaderp2p.NewExchangeServer(host, headerAdapter, opts...) +``` + +### Benefits + +1. **Single source of truth** - All block data lives in one place +2. **Reduced disk usage** - No duplicate storage of headers and data +3. **Simplified rollback** - Rolling back the ev-node store automatically affects P2P sync +4. **Atomic operations** - Block storage and state updates remain atomic + +### Architecture + +```mermaid +classDiagram + class Store { + <> + +GetHeader(height) SignedHeader + +GetBlockData(height) (SignedHeader, Data) + +GetBlockByHash(hash) (SignedHeader, Data) + +NewBatch() Batch + } + + class HeaderStoreAdapter { + -store Store + -height atomic.Uint64 + +Head() SignedHeader + +GetByHeight(height) SignedHeader + +Get(hash) SignedHeader + +Append(headers...) error + +Height() uint64 + } + + class DataStoreAdapter { + -store Store + -height atomic.Uint64 + +Head() Data + +GetByHeight(height) Data + +Get(hash) Data + +Append(data...) error + +Height() uint64 + } + + class GoHeaderStore { + <> + +Head() H + +Get(hash) H + +GetByHeight(height) H + +Append(items...) error + +Height() uint64 + } + + Store <-- HeaderStoreAdapter : wraps + Store <-- DataStoreAdapter : wraps + GoHeaderStore <|.. HeaderStoreAdapter : implements + GoHeaderStore <|.. DataStoreAdapter : implements +``` diff --git a/pkg/store/data_store_adapter.go b/pkg/store/data_store_adapter.go new file mode 100644 index 0000000000..6f54d17d53 --- /dev/null +++ b/pkg/store/data_store_adapter.go @@ -0,0 +1,345 @@ +package store + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/celestiaorg/go-header" + + "github.com/evstack/ev-node/types" +) + +// DataStoreAdapter wraps Store to implement header.Store[*types.Data]. +// This allows the ev-node store to be used directly by go-header's P2P infrastructure, +// eliminating the need for a separate go-header store and reducing data duplication. +type DataStoreAdapter struct { + store Store + + // height caches the current height to avoid repeated context-based lookups. + // Updated on successful reads and writes. + height atomic.Uint64 + + // mu protects initialization state + mu sync.RWMutex + initialized bool + + // onDeleteFn is called when data is deleted (for rollback scenarios) + onDeleteFn func(context.Context, uint64) error +} + +// Compile-time check that DataStoreAdapter implements header.Store +var _ header.Store[*types.Data] = (*DataStoreAdapter)(nil) + +// NewDataStoreAdapter creates a new DataStoreAdapter wrapping the given store. +func NewDataStoreAdapter(store Store) *DataStoreAdapter { + adapter := &DataStoreAdapter{ + store: store, + } + + // Initialize height from store + if h, err := store.Height(context.Background()); err == nil && h > 0 { + adapter.height.Store(h) + adapter.initialized = true + } + + return adapter +} + +// Start implements header.Store. It initializes the adapter if needed. +func (a *DataStoreAdapter) Start(ctx context.Context) error { + a.mu.Lock() + defer a.mu.Unlock() + + // Refresh height from store + h, err := a.store.Height(ctx) + if err != nil { + return err + } + + if h > 0 { + a.height.Store(h) + a.initialized = true + } + + return nil +} + +// Stop implements header.Store. No-op since the underlying store lifecycle +// is managed separately. +func (a *DataStoreAdapter) Stop(ctx context.Context) error { + return nil +} + +// Head returns the data for the highest block in the store. +func (a *DataStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.Data]) (*types.Data, error) { + height := a.height.Load() + if height == 0 { + // Try to refresh from store + h, err := a.store.Height(ctx) + if err != nil { + return nil, header.ErrNotFound + } + if h == 0 { + return nil, header.ErrNotFound + } + a.height.Store(h) + height = h + } + + _, data, err := a.store.GetBlockData(ctx, height) + if err != nil { + return nil, header.ErrNotFound + } + + return data, nil +} + +// Tail returns the data for the lowest block in the store. +// For ev-node, this is typically the genesis/initial height. +func (a *DataStoreAdapter) Tail(ctx context.Context) (*types.Data, error) { + height := a.height.Load() + if height == 0 { + return nil, header.ErrNotFound + } + + // Try height 1 first (most common case) + _, data, err := a.store.GetBlockData(ctx, 1) + if err == nil { + return data, nil + } + + // Linear scan from 1 to current height to find first data + for h := uint64(2); h <= height; h++ { + _, data, err = a.store.GetBlockData(ctx, h) + if err == nil { + return data, nil + } + } + + return nil, header.ErrNotFound +} + +// Get returns data by its hash. +func (a *DataStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.Data, error) { + _, data, err := a.store.GetBlockByHash(ctx, hash) + if err != nil { + return nil, header.ErrNotFound + } + return data, nil +} + +// GetByHeight returns data at the given height. +func (a *DataStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.Data, error) { + _, data, err := a.store.GetBlockData(ctx, height) + if err != nil { + return nil, header.ErrNotFound + } + return data, nil +} + +// GetRangeByHeight returns data in the range [from.Height()+1, to). +// This follows go-header's convention where 'from' is the trusted data +// and we return data starting from the next height. +func (a *DataStoreAdapter) GetRangeByHeight(ctx context.Context, from *types.Data, to uint64) ([]*types.Data, error) { + if from == nil { + return nil, errors.New("from data cannot be nil") + } + + startHeight := from.Height() + 1 + if startHeight >= to { + return nil, nil + } + + return a.GetRange(ctx, startHeight, to) +} + +// GetRange returns data in the range [from, to). +func (a *DataStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*types.Data, error) { + if from >= to { + return nil, nil + } + + dataList := make([]*types.Data, 0, to-from) + for height := from; height < to; height++ { + _, data, err := a.store.GetBlockData(ctx, height) + if err != nil { + // Return what we have so far + if len(dataList) > 0 { + return dataList, nil + } + return nil, header.ErrNotFound + } + dataList = append(dataList, data) + } + + return dataList, nil +} + +// Has checks if data with the given hash exists. +func (a *DataStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { + _, _, err := a.store.GetBlockByHash(ctx, hash) + if err != nil { + return false, nil + } + return true, nil +} + +// HasAt checks if data exists at the given height. +func (a *DataStoreAdapter) HasAt(ctx context.Context, height uint64) bool { + _, _, err := a.store.GetBlockData(ctx, height) + return err == nil +} + +// Height returns the current height of the store. +func (a *DataStoreAdapter) Height() uint64 { + height := a.height.Load() + if height == 0 { + // Try to refresh from store + if h, err := a.store.Height(context.Background()); err == nil { + a.height.Store(h) + return h + } + } + return height +} + +// Append stores data in the store. +// This method is called by go-header's P2P infrastructure when data is received. +// We save the data to the ev-node store to ensure it's available for the syncer. +func (a *DataStoreAdapter) Append(ctx context.Context, dataList ...*types.Data) error { + if len(dataList) == 0 { + return nil + } + + for _, data := range dataList { + if data == nil || data.IsZero() { + continue + } + + // Check if we already have this data + if a.HasAt(ctx, data.Height()) { + continue + } + + // Create a batch and save the data + // Note: We create a minimal header since we only have the data at this point. + // The full block will be saved by the syncer when processing from DA. + batch, err := a.store.NewBatch(ctx) + if err != nil { + return fmt.Errorf("failed to create batch for data at height %d: %w", data.Height(), err) + } + + // Create a minimal header for the data + // The syncer will overwrite this with complete block data when it processes from DA + minimalHeader := &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + ChainID: data.ChainID(), + Height: data.Height(), + Time: uint64(data.Time().UnixNano()), + }, + LastHeaderHash: data.LastHeader(), + DataHash: data.DACommitment(), + }, + } + + if err := batch.SaveBlockData(minimalHeader, data, &types.Signature{}); err != nil { + return fmt.Errorf("failed to save data at height %d: %w", data.Height(), err) + } + + if err := batch.SetHeight(data.Height()); err != nil { + return fmt.Errorf("failed to set height for data at height %d: %w", data.Height(), err) + } + + if err := batch.Commit(); err != nil { + return fmt.Errorf("failed to commit data at height %d: %w", data.Height(), err) + } + + // Update cached height + if data.Height() > a.height.Load() { + a.height.Store(data.Height()) + } + } + + return nil +} + +// Init initializes the store with the first data. +// This is called by go-header when bootstrapping the store with trusted data. +func (a *DataStoreAdapter) Init(ctx context.Context, d *types.Data) error { + a.mu.Lock() + defer a.mu.Unlock() + + if a.initialized { + return nil + } + + if d == nil || d.IsZero() { + return nil + } + + // Use Append to save the data + a.mu.Unlock() // Unlock before calling Append to avoid deadlock + err := a.Append(ctx, d) + a.mu.Lock() // Re-lock for the initialized flag update + + if err != nil { + return err + } + + a.initialized = true + return nil +} + +// Sync ensures all pending writes are flushed. +// Delegates to the underlying store's sync if available. +func (a *DataStoreAdapter) Sync(ctx context.Context) error { + // The underlying store handles its own syncing + return nil +} + +// DeleteRange deletes data in the range [from, to). +// This is used for rollback operations. +func (a *DataStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { + // Rollback is handled by the ev-node store's Rollback method + // This is called during store cleanup operations + if a.onDeleteFn != nil { + for height := from; height < to; height++ { + if err := a.onDeleteFn(ctx, height); err != nil { + return err + } + } + } + + // Update cached height if necessary + if from <= a.height.Load() { + a.height.Store(from - 1) + } + + return nil +} + +// OnDelete registers a callback to be invoked when data is deleted. +func (a *DataStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { + a.onDeleteFn = fn +} + +// RefreshHeight updates the cached height from the underlying store. +// This should be called after the syncer processes a new block. +func (a *DataStoreAdapter) RefreshHeight(ctx context.Context) error { + h, err := a.store.Height(ctx) + if err != nil { + return err + } + a.height.Store(h) + return nil +} + +// SetHeight updates the cached height. +// This is useful when the syncer knows the new height after processing a block. +func (a *DataStoreAdapter) SetHeight(height uint64) { + a.height.Store(height) +} diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go new file mode 100644 index 0000000000..6d2fbb0c07 --- /dev/null +++ b/pkg/store/data_store_adapter_test.go @@ -0,0 +1,552 @@ +package store + +import ( + "context" + "crypto/sha256" + "testing" + "time" + + "github.com/celestiaorg/go-header" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/types" +) + +// computeDataIndexHash computes the hash used for indexing in the store. +// The store indexes by sha256(signedHeader.MarshalBinary()), so for data tests +// we need to use the header hash from the saved block. +func computeDataIndexHash(h *types.SignedHeader) []byte { + blob, _ := h.MarshalBinary() + hash := sha256.Sum256(blob) + return hash[:] +} + +func TestDataStoreAdapter_NewDataStoreAdapter(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + adapter := NewDataStoreAdapter(store) + require.NotNil(t, adapter) + + // Initially, height should be 0 + assert.Equal(t, uint64(0), adapter.Height()) + + // Head should return ErrNotFound when empty + _, err = adapter.Head(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestDataStoreAdapter_AppendAndRetrieve(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Create test data + _, d1 := types.GetRandomBlock(1, 2, "test-chain") + _, d2 := types.GetRandomBlock(2, 2, "test-chain") + + // Append data + err = adapter.Append(ctx, d1, d2) + require.NoError(t, err) + + // Check height is updated + assert.Equal(t, uint64(2), adapter.Height()) + + // Retrieve by height + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, d1.Height(), retrieved.Height()) + + retrieved, err = adapter.GetByHeight(ctx, 2) + require.NoError(t, err) + assert.Equal(t, d2.Height(), retrieved.Height()) + + // Head should return the latest + head, err := adapter.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(2), head.Height()) +} + +func TestDataStoreAdapter_Get(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // First save via the underlying store to get proper header hash + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create adapter after data is in store + adapter := NewDataStoreAdapter(store) + + // Get by hash - need to use the index hash (sha256 of marshaled SignedHeader) + hash := computeDataIndexHash(h1) + retrieved, err := adapter.Get(ctx, hash) + require.NoError(t, err) + assert.Equal(t, d1.Height(), retrieved.Height()) + + // Get non-existent hash + _, err = adapter.Get(ctx, []byte("nonexistent")) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestDataStoreAdapter_Has(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create adapter after data is in store + adapter := NewDataStoreAdapter(store) + + // Has should return true for existing data - use index hash + has, err := adapter.Has(ctx, computeDataIndexHash(h1)) + require.NoError(t, err) + assert.True(t, has) + + // Has should return false for non-existent + has, err = adapter.Has(ctx, []byte("nonexistent")) + require.NoError(t, err) + assert.False(t, has) +} + +func TestDataStoreAdapter_HasAt(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, d1)) + + // HasAt should return true for existing height + assert.True(t, adapter.HasAt(ctx, 1)) + + // HasAt should return false for non-existent height + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestDataStoreAdapter_GetRange(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Create and append multiple data blocks + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + _, d3 := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + + // GetRange [1, 3) should return data 1 and 2 + dataList, err := adapter.GetRange(ctx, 1, 3) + require.NoError(t, err) + require.Len(t, dataList, 2) + assert.Equal(t, uint64(1), dataList[0].Height()) + assert.Equal(t, uint64(2), dataList[1].Height()) + + // GetRange with from >= to should return nil + dataList, err = adapter.GetRange(ctx, 3, 3) + require.NoError(t, err) + assert.Nil(t, dataList) +} + +func TestDataStoreAdapter_GetRangeByHeight(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + _, d3 := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + + // GetRangeByHeight from d1 to 4 should return data 2 and 3 + dataList, err := adapter.GetRangeByHeight(ctx, d1, 4) + require.NoError(t, err) + require.Len(t, dataList, 2) + assert.Equal(t, uint64(2), dataList[0].Height()) + assert.Equal(t, uint64(3), dataList[1].Height()) +} + +func TestDataStoreAdapter_Init(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + + // Init should save the data + err = adapter.Init(ctx, d1) + require.NoError(t, err) + + // Verify it's stored + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, d1.Height(), retrieved.Height()) + + // Init again should be a no-op (already initialized) + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + err = adapter.Init(ctx, d2) + require.NoError(t, err) + + // Height 2 should not be stored since Init was already done + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestDataStoreAdapter_Tail(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Tail on empty store should return ErrNotFound + _, err = adapter.Tail(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) + + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2)) + + // Tail should return the first data + tail, err := adapter.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), tail.Height()) +} + +func TestDataStoreAdapter_StartStop(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Start should not error + err = adapter.Start(ctx) + require.NoError(t, err) + + // Stop should not error + err = adapter.Stop(ctx) + require.NoError(t, err) +} + +func TestDataStoreAdapter_DeleteRange(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + _, d3 := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2, d3)) + + assert.Equal(t, uint64(3), adapter.Height()) + + // DeleteRange should update cached height + err = adapter.DeleteRange(ctx, 2, 4) + require.NoError(t, err) + + // Cached height should be updated to 1 + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestDataStoreAdapter_OnDelete(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2)) + + // Track deleted heights + var deletedHeights []uint64 + adapter.OnDelete(func(ctx context.Context, height uint64) error { + deletedHeights = append(deletedHeights, height) + return nil + }) + + err = adapter.DeleteRange(ctx, 1, 3) + require.NoError(t, err) + + assert.Equal(t, []uint64{1, 2}, deletedHeights) +} + +func TestDataStoreAdapter_RefreshHeight(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Save a block directly to the underlying store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Adapter height may be stale + // RefreshHeight should update it + err = adapter.RefreshHeight(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestDataStoreAdapter_SetHeight(t *testing.T) { + t.Parallel() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + adapter.SetHeight(42) + assert.Equal(t, uint64(42), adapter.Height()) +} + +func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, d1 := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, d1)) + + // Append the same data again should not error (skips existing) + err = adapter.Append(ctx, d1) + require.NoError(t, err) + + // Height should still be 1 + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestDataStoreAdapter_AppendNilData(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Append with nil and empty should not error + err = adapter.Append(ctx) + require.NoError(t, err) + + var nilData *types.Data + err = adapter.Append(ctx, nilData) + require.NoError(t, err) + + assert.Equal(t, uint64(0), adapter.Height()) +} + +func TestDataStoreAdapter_Sync(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Sync should not error + err = adapter.Sync(ctx) + require.NoError(t, err) +} + +func TestDataStoreAdapter_HeightRefreshFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save data directly to store before creating adapter + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create adapter - it should pick up the height from store + adapter := NewDataStoreAdapter(store) + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestDataStoreAdapter_GetByHeightNotFound(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + _, err = adapter.GetByHeight(ctx, 999) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestDataStoreAdapter_InitWithNil(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Init with nil should not error but also not mark as initialized + err = adapter.Init(ctx, nil) + require.NoError(t, err) + + // Should still return ErrNotFound + _, err = adapter.Head(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestDataStoreAdapter_ContextTimeout(t *testing.T) { + t.Parallel() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Create a context that's already canceled + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + defer cancel() + time.Sleep(1 * time.Millisecond) // Ensure context is expired + + // Operations should still work with in-memory store + // but this tests the context is being passed through + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + // Note: In-memory store doesn't actually check context, but this verifies + // the adapter passes the context through + _ = adapter.Append(ctx, d1) +} + +func TestDataStoreAdapter_GetRangePartial(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Only append data for heights 1 and 2, not 3 + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2)) + + // GetRange [1, 5) should return data 1 and 2 (partial result) + dataList, err := adapter.GetRange(ctx, 1, 5) + require.NoError(t, err) + require.Len(t, dataList, 2) + assert.Equal(t, uint64(1), dataList[0].Height()) + assert.Equal(t, uint64(2), dataList[1].Height()) +} + +func TestDataStoreAdapter_GetRangeEmpty(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // GetRange on empty store should return ErrNotFound + _, err = adapter.GetRange(ctx, 1, 5) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestDataStoreAdapter_MultipleAppends(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Append data in multiple batches + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1)) + assert.Equal(t, uint64(1), adapter.Height()) + + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d2)) + assert.Equal(t, uint64(2), adapter.Height()) + + _, d3 := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d3)) + assert.Equal(t, uint64(3), adapter.Height()) + + // Verify all data is retrievable + for h := uint64(1); h <= 3; h++ { + assert.True(t, adapter.HasAt(ctx, h)) + } +} diff --git a/pkg/store/header_store_adapter.go b/pkg/store/header_store_adapter.go new file mode 100644 index 0000000000..15c81a6bc4 --- /dev/null +++ b/pkg/store/header_store_adapter.go @@ -0,0 +1,345 @@ +package store + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/celestiaorg/go-header" + + "github.com/evstack/ev-node/types" +) + +// HeaderStoreAdapter wraps Store to implement header.Store[*types.SignedHeader]. +// This allows the ev-node store to be used directly by go-header's P2P infrastructure, +// eliminating the need for a separate go-header store and reducing data duplication. +type HeaderStoreAdapter struct { + store Store + + // height caches the current height to avoid repeated context-based lookups. + // Updated on successful reads and writes. + height atomic.Uint64 + + // mu protects initialization state + mu sync.RWMutex + initialized bool + + // onDeleteFn is called when headers are deleted (for rollback scenarios) + onDeleteFn func(context.Context, uint64) error +} + +// Compile-time check that HeaderStoreAdapter implements header.Store +var _ header.Store[*types.SignedHeader] = (*HeaderStoreAdapter)(nil) + +// NewHeaderStoreAdapter creates a new HeaderStoreAdapter wrapping the given store. +func NewHeaderStoreAdapter(store Store) *HeaderStoreAdapter { + adapter := &HeaderStoreAdapter{ + store: store, + } + + // Initialize height from store + if h, err := store.Height(context.Background()); err == nil && h > 0 { + adapter.height.Store(h) + adapter.initialized = true + } + + return adapter +} + +// Start implements header.Store. It initializes the adapter if needed. +func (a *HeaderStoreAdapter) Start(ctx context.Context) error { + a.mu.Lock() + defer a.mu.Unlock() + + // Refresh height from store + h, err := a.store.Height(ctx) + if err != nil { + return err + } + + if h > 0 { + a.height.Store(h) + a.initialized = true + } + + return nil +} + +// Stop implements header.Store. No-op since the underlying store lifecycle +// is managed separately. +func (a *HeaderStoreAdapter) Stop(ctx context.Context) error { + return nil +} + +// Head returns the highest header in the store. +func (a *HeaderStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.SignedHeader]) (*types.SignedHeader, error) { + height := a.height.Load() + if height == 0 { + // Try to refresh from store + h, err := a.store.Height(ctx) + if err != nil { + return nil, header.ErrNotFound + } + if h == 0 { + return nil, header.ErrNotFound + } + a.height.Store(h) + height = h + } + + hdr, err := a.store.GetHeader(ctx, height) + if err != nil { + return nil, header.ErrNotFound + } + + return hdr, nil +} + +// Tail returns the lowest header in the store. +// For ev-node, this is typically the genesis/initial height. +func (a *HeaderStoreAdapter) Tail(ctx context.Context) (*types.SignedHeader, error) { + // Start from height 1 and find the first available header + // This is a simple implementation; could be optimized with metadata + height := a.height.Load() + if height == 0 { + return nil, header.ErrNotFound + } + + // Try height 1 first (most common case) + hdr, err := a.store.GetHeader(ctx, 1) + if err == nil { + return hdr, nil + } + + // Linear scan from 1 to current height to find first header + for h := uint64(2); h <= height; h++ { + hdr, err = a.store.GetHeader(ctx, h) + if err == nil { + return hdr, nil + } + } + + return nil, header.ErrNotFound +} + +// Get returns a header by its hash. +func (a *HeaderStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.SignedHeader, error) { + hdr, _, err := a.store.GetBlockByHash(ctx, hash) + if err != nil { + return nil, header.ErrNotFound + } + return hdr, nil +} + +// GetByHeight returns a header at the given height. +func (a *HeaderStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.SignedHeader, error) { + hdr, err := a.store.GetHeader(ctx, height) + if err != nil { + return nil, header.ErrNotFound + } + return hdr, nil +} + +// GetRangeByHeight returns headers in the range [from.Height()+1, to). +// This follows go-header's convention where 'from' is the trusted header +// and we return headers starting from the next height. +func (a *HeaderStoreAdapter) GetRangeByHeight(ctx context.Context, from *types.SignedHeader, to uint64) ([]*types.SignedHeader, error) { + if from == nil { + return nil, errors.New("from header cannot be nil") + } + + startHeight := from.Height() + 1 + if startHeight >= to { + return nil, nil + } + + return a.GetRange(ctx, startHeight, to) +} + +// GetRange returns headers in the range [from, to). +func (a *HeaderStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*types.SignedHeader, error) { + if from >= to { + return nil, nil + } + + headers := make([]*types.SignedHeader, 0, to-from) + for height := from; height < to; height++ { + hdr, err := a.store.GetHeader(ctx, height) + if err != nil { + // Return what we have so far + if len(headers) > 0 { + return headers, nil + } + return nil, header.ErrNotFound + } + headers = append(headers, hdr) + } + + return headers, nil +} + +// Has checks if a header with the given hash exists. +func (a *HeaderStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { + _, _, err := a.store.GetBlockByHash(ctx, hash) + if err != nil { + return false, nil + } + return true, nil +} + +// HasAt checks if a header exists at the given height. +func (a *HeaderStoreAdapter) HasAt(ctx context.Context, height uint64) bool { + _, err := a.store.GetHeader(ctx, height) + return err == nil +} + +// Height returns the current height of the store. +func (a *HeaderStoreAdapter) Height() uint64 { + height := a.height.Load() + if height == 0 { + // Try to refresh from store + if h, err := a.store.Height(context.Background()); err == nil { + a.height.Store(h) + return h + } + } + return height +} + +// Append stores headers in the store. +// This method is called by go-header's P2P infrastructure when headers are received. +// We save the headers to the ev-node store to ensure they're available for the syncer. +func (a *HeaderStoreAdapter) Append(ctx context.Context, headers ...*types.SignedHeader) error { + if len(headers) == 0 { + return nil + } + + for _, hdr := range headers { + if hdr == nil || hdr.IsZero() { + continue + } + + // Check if we already have this header + if a.HasAt(ctx, hdr.Height()) { + continue + } + + // Create a batch and save the header + // Note: We create empty data since we only have the header at this point. + // The full block data will be saved by the syncer when processing from DA. + batch, err := a.store.NewBatch(ctx) + if err != nil { + return fmt.Errorf("failed to create batch for header at height %d: %w", hdr.Height(), err) + } + + // Save header with empty data and signature + // The syncer will overwrite this with complete block data when it processes from DA + emptyData := &types.Data{ + Metadata: &types.Metadata{ + ChainID: hdr.ChainID(), + Height: hdr.Height(), + Time: uint64(hdr.Time().UnixNano()), + LastDataHash: hdr.LastHeader(), + }, + Txs: nil, + } + + if err := batch.SaveBlockData(hdr, emptyData, &hdr.Signature); err != nil { + return fmt.Errorf("failed to save header at height %d: %w", hdr.Height(), err) + } + + if err := batch.SetHeight(hdr.Height()); err != nil { + return fmt.Errorf("failed to set height for header at height %d: %w", hdr.Height(), err) + } + + if err := batch.Commit(); err != nil { + return fmt.Errorf("failed to commit header at height %d: %w", hdr.Height(), err) + } + + // Update cached height + if hdr.Height() > a.height.Load() { + a.height.Store(hdr.Height()) + } + } + + return nil +} + +// Init initializes the store with the first header. +// This is called by go-header when bootstrapping the store with a trusted header. +func (a *HeaderStoreAdapter) Init(ctx context.Context, h *types.SignedHeader) error { + a.mu.Lock() + defer a.mu.Unlock() + + if a.initialized { + return nil + } + + if h == nil || h.IsZero() { + return nil + } + + // Use Append to save the header + a.mu.Unlock() // Unlock before calling Append to avoid deadlock + err := a.Append(ctx, h) + a.mu.Lock() // Re-lock for the initialized flag update + + if err != nil { + return err + } + + a.initialized = true + return nil +} + +// Sync ensures all pending writes are flushed. +// Delegates to the underlying store's sync if available. +func (a *HeaderStoreAdapter) Sync(ctx context.Context) error { + // The underlying store handles its own syncing + return nil +} + +// DeleteRange deletes headers in the range [from, to). +// This is used for rollback operations. +func (a *HeaderStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { + // Rollback is handled by the ev-node store's Rollback method + // This is called during store cleanup operations + if a.onDeleteFn != nil { + for height := from; height < to; height++ { + if err := a.onDeleteFn(ctx, height); err != nil { + return err + } + } + } + + // Update cached height if necessary + if from <= a.height.Load() { + a.height.Store(from - 1) + } + + return nil +} + +// OnDelete registers a callback to be invoked when headers are deleted. +func (a *HeaderStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { + a.onDeleteFn = fn +} + +// RefreshHeight updates the cached height from the underlying store. +// This should be called after the syncer processes a new block. +func (a *HeaderStoreAdapter) RefreshHeight(ctx context.Context) error { + h, err := a.store.Height(ctx) + if err != nil { + return err + } + a.height.Store(h) + return nil +} + +// SetHeight updates the cached height. +// This is useful when the syncer knows the new height after processing a block. +func (a *HeaderStoreAdapter) SetHeight(height uint64) { + a.height.Store(height) +} diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go new file mode 100644 index 0000000000..2b6bff84bd --- /dev/null +++ b/pkg/store/header_store_adapter_test.go @@ -0,0 +1,474 @@ +package store + +import ( + "context" + "crypto/sha256" + "testing" + "time" + + "github.com/celestiaorg/go-header" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/types" +) + +// computeHeaderIndexHash computes the hash used for indexing in the store. +// The store indexes by sha256(signedHeader.MarshalBinary()), not signedHeader.Hash(). +func computeHeaderIndexHash(h *types.SignedHeader) []byte { + blob, _ := h.MarshalBinary() + hash := sha256.Sum256(blob) + return hash[:] +} + +func TestHeaderStoreAdapter_NewHeaderStoreAdapter(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + adapter := NewHeaderStoreAdapter(store) + require.NotNil(t, adapter) + + // Initially, height should be 0 + assert.Equal(t, uint64(0), adapter.Height()) + + // Head should return ErrNotFound when empty + _, err = adapter.Head(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestHeaderStoreAdapter_AppendAndRetrieve(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Create test headers + h1, _ := types.GetRandomBlock(1, 2, "test-chain") + h2, _ := types.GetRandomBlock(2, 2, "test-chain") + + // Append headers + err = adapter.Append(ctx, h1, h2) + require.NoError(t, err) + + // Check height is updated + assert.Equal(t, uint64(2), adapter.Height()) + + // Retrieve by height + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, h1.Height(), retrieved.Height()) + + retrieved, err = adapter.GetByHeight(ctx, 2) + require.NoError(t, err) + assert.Equal(t, h2.Height(), retrieved.Height()) + + // Head should return the latest + head, err := adapter.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(2), head.Height()) +} + +func TestHeaderStoreAdapter_Get(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // Get by hash - need to use the index hash (sha256 of marshaled SignedHeader) + hash := computeHeaderIndexHash(h1) + retrieved, err := adapter.Get(ctx, hash) + require.NoError(t, err) + assert.Equal(t, h1.Height(), retrieved.Height()) + + // Get non-existent hash + _, err = adapter.Get(ctx, []byte("nonexistent")) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestHeaderStoreAdapter_Has(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // Has should return true for existing header - use index hash + has, err := adapter.Has(ctx, computeHeaderIndexHash(h1)) + require.NoError(t, err) + assert.True(t, has) + + // Has should return false for non-existent + has, err = adapter.Has(ctx, []byte("nonexistent")) + require.NoError(t, err) + assert.False(t, has) +} + +func TestHeaderStoreAdapter_HasAt(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // HasAt should return true for existing height + assert.True(t, adapter.HasAt(ctx, 1)) + + // HasAt should return false for non-existent height + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestHeaderStoreAdapter_GetRange(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Create and append multiple headers + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + h3, _ := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + + // GetRange [1, 3) should return headers 1 and 2 + headers, err := adapter.GetRange(ctx, 1, 3) + require.NoError(t, err) + require.Len(t, headers, 2) + assert.Equal(t, uint64(1), headers[0].Height()) + assert.Equal(t, uint64(2), headers[1].Height()) + + // GetRange with from >= to should return nil + headers, err = adapter.GetRange(ctx, 3, 3) + require.NoError(t, err) + assert.Nil(t, headers) +} + +func TestHeaderStoreAdapter_GetRangeByHeight(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + h3, _ := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + + // GetRangeByHeight from h1 to 4 should return headers 2 and 3 + headers, err := adapter.GetRangeByHeight(ctx, h1, 4) + require.NoError(t, err) + require.Len(t, headers, 2) + assert.Equal(t, uint64(2), headers[0].Height()) + assert.Equal(t, uint64(3), headers[1].Height()) +} + +func TestHeaderStoreAdapter_Init(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + + // Init should save the header + err = adapter.Init(ctx, h1) + require.NoError(t, err) + + // Verify it's stored + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, h1.Height(), retrieved.Height()) + + // Init again should be a no-op (already initialized) + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + err = adapter.Init(ctx, h2) + require.NoError(t, err) + + // Height 2 should not be stored since Init was already done + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestHeaderStoreAdapter_Tail(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Tail on empty store should return ErrNotFound + _, err = adapter.Tail(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) + + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2)) + + // Tail should return the first header + tail, err := adapter.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), tail.Height()) +} + +func TestHeaderStoreAdapter_StartStop(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Start should not error + err = adapter.Start(ctx) + require.NoError(t, err) + + // Stop should not error + err = adapter.Stop(ctx) + require.NoError(t, err) +} + +func TestHeaderStoreAdapter_DeleteRange(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + h3, _ := types.GetRandomBlock(3, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2, h3)) + + assert.Equal(t, uint64(3), adapter.Height()) + + // DeleteRange should update cached height + err = adapter.DeleteRange(ctx, 2, 4) + require.NoError(t, err) + + // Cached height should be updated to 1 + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestHeaderStoreAdapter_OnDelete(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2)) + + // Track deleted heights + var deletedHeights []uint64 + adapter.OnDelete(func(ctx context.Context, height uint64) error { + deletedHeights = append(deletedHeights, height) + return nil + }) + + err = adapter.DeleteRange(ctx, 1, 3) + require.NoError(t, err) + + assert.Equal(t, []uint64{1, 2}, deletedHeights) +} + +func TestHeaderStoreAdapter_RefreshHeight(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Save a block directly to the underlying store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Adapter height may be stale + // RefreshHeight should update it + err = adapter.RefreshHeight(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestHeaderStoreAdapter_SetHeight(t *testing.T) { + t.Parallel() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + adapter.SetHeight(42) + assert.Equal(t, uint64(42), adapter.Height()) +} + +func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + h1, _ := types.GetRandomBlock(1, 2, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // Append the same header again should not error (skips existing) + err = adapter.Append(ctx, h1) + require.NoError(t, err) + + // Height should still be 1 + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestHeaderStoreAdapter_AppendNilHeaders(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Append with nil and empty should not error + err = adapter.Append(ctx) + require.NoError(t, err) + + var nilHeader *types.SignedHeader + err = adapter.Append(ctx, nilHeader) + require.NoError(t, err) + + assert.Equal(t, uint64(0), adapter.Height()) +} + +func TestHeaderStoreAdapter_Sync(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Sync should not error + err = adapter.Sync(ctx) + require.NoError(t, err) +} + +func TestHeaderStoreAdapter_HeightRefreshFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save data directly to store before creating adapter + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create adapter - it should pick up the height from store + adapter := NewHeaderStoreAdapter(store) + assert.Equal(t, uint64(1), adapter.Height()) +} + +func TestHeaderStoreAdapter_GetByHeightNotFound(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + _, err = adapter.GetByHeight(ctx, 999) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestHeaderStoreAdapter_InitWithNil(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Init with nil should not error but also not mark as initialized + err = adapter.Init(ctx, nil) + require.NoError(t, err) + + // Should still return ErrNotFound + _, err = adapter.Head(ctx) + assert.ErrorIs(t, err, header.ErrNotFound) +} + +func TestHeaderStoreAdapter_ContextTimeout(t *testing.T) { + t.Parallel() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Create a context that's already canceled + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + defer cancel() + time.Sleep(1 * time.Millisecond) // Ensure context is expired + + // Operations should still work with in-memory store + // but this tests the context is being passed through + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + // Note: In-memory store doesn't actually check context, but this verifies + // the adapter passes the context through + _ = adapter.Append(ctx, h1) +} diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 82a2ac0422..be72e97179 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -10,9 +10,7 @@ import ( "github.com/celestiaorg/go-header" goheaderp2p "github.com/celestiaorg/go-header/p2p" - goheaderstore "github.com/celestiaorg/go-header/store" goheadersync "github.com/celestiaorg/go-header/sync" - ds "github.com/ipfs/go-datastore" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" @@ -52,7 +50,7 @@ type SyncService[H header.Header[H]] struct { ex *exchangeWrapper[H] sub *goheaderp2p.Subscriber[H] p2pServer *goheaderp2p.ExchangeServer[H] - store *goheaderstore.Store[H] + store header.Store[H] syncer *goheadersync.Syncer[H] syncerStatus *SyncerStatus topicSubscription header.Subscription[H] @@ -71,8 +69,7 @@ type HeaderSyncService = SyncService[*types.SignedHeader] // NewDataSyncService returns a new DataSyncService. func NewDataSyncService( - batchingDataStore ds.Batching, - daStore store.Store, + evStore store.Store, conf config.Config, genesis genesis.Genesis, p2p *p2p.Client, @@ -82,29 +79,30 @@ func NewDataSyncService( var getterByHeight GetterByHeightFunc[*types.Data] var rangeGetter RangeGetterFunc[*types.Data] - if daStore != nil { + if evStore != nil { getter = func(ctx context.Context, hash header.Hash) (*types.Data, error) { - _, d, err := daStore.GetBlockByHash(ctx, hash) + _, d, err := evStore.GetBlockByHash(ctx, hash) return d, err } getterByHeight = func(ctx context.Context, height uint64) (*types.Data, error) { - _, d, err := daStore.GetBlockData(ctx, height) + _, d, err := evStore.GetBlockData(ctx, height) return d, err } rangeGetter = func(ctx context.Context, from, to uint64) ([]*types.Data, uint64, error) { return getContiguousRange(ctx, from, to, func(ctx context.Context, h uint64) (*types.Data, error) { - _, d, err := daStore.GetBlockData(ctx, h) + _, d, err := evStore.GetBlockData(ctx, h) return d, err }) } } - return newSyncService[*types.Data](batchingDataStore, getter, getterByHeight, rangeGetter, dataSync, conf, genesis, p2p, logger) + + storeAdapter := store.NewDataStoreAdapter(evStore) + return newSyncService[*types.Data](storeAdapter, getter, getterByHeight, rangeGetter, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. func NewHeaderSyncService( - dsStore ds.Batching, - daStore store.Store, + evStore store.Store, conf config.Config, genesis genesis.Genesis, p2p *p2p.Client, @@ -114,23 +112,25 @@ func NewHeaderSyncService( var getterByHeight GetterByHeightFunc[*types.SignedHeader] var rangeGetter RangeGetterFunc[*types.SignedHeader] - if daStore != nil { + if evStore != nil { getter = func(ctx context.Context, hash header.Hash) (*types.SignedHeader, error) { - h, _, err := daStore.GetBlockByHash(ctx, hash) + h, _, err := evStore.GetBlockByHash(ctx, hash) return h, err } getterByHeight = func(ctx context.Context, height uint64) (*types.SignedHeader, error) { - return daStore.GetHeader(ctx, height) + return evStore.GetHeader(ctx, height) } rangeGetter = func(ctx context.Context, from, to uint64) ([]*types.SignedHeader, uint64, error) { - return getContiguousRange(ctx, from, to, daStore.GetHeader) + return getContiguousRange(ctx, from, to, evStore.GetHeader) } } - return newSyncService[*types.SignedHeader](dsStore, getter, getterByHeight, rangeGetter, headerSync, conf, genesis, p2p, logger) + + storeAdapter := store.NewHeaderStoreAdapter(evStore) + return newSyncService[*types.SignedHeader](storeAdapter, getter, getterByHeight, rangeGetter, headerSync, conf, genesis, p2p, logger) } func newSyncService[H header.Header[H]]( - dsStore ds.Batching, + storeAdapter header.Store[H], getter GetterFunc[H], getterByHeight GetterByHeightFunc[H], rangeGetter RangeGetterFunc[H], @@ -144,20 +144,11 @@ func newSyncService[H header.Header[H]]( return nil, errors.New("p2p client cannot be nil") } - ss, err := goheaderstore.NewStore[H]( - dsStore, - goheaderstore.WithStorePrefix(string(syncType)), - goheaderstore.WithMetrics(), - ) - if err != nil { - return nil, fmt.Errorf("failed to initialize the %s store: %w", syncType, err) - } - svc := &SyncService[H]{ conf: conf, genesis: genesis, p2p: p2p, - store: ss, + store: storeAdapter, getter: getter, getterByHeight: getterByHeight, rangeGetter: rangeGetter, @@ -197,8 +188,9 @@ func (syncService *SyncService[H]) Store() header.Store[H] { return syncService.store } -// WriteToStoreAndBroadcast initializes store if needed and broadcasts provided header or block. -// Note: Only returns an error in case store can't be initialized. Logs error if there's one while broadcasting. +// WriteToStoreAndBroadcast broadcasts provided header or block to P2P network. +// Note: With the store adapter approach, actual storage is handled by the syncer +// writing to the ev-node store. This method primarily handles P2P broadcasting. func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, headerOrData H, opts ...pubsub.PubOpt) error { if syncService.genesis.InitialHeight == 0 { return fmt.Errorf("invalid initial height; cannot be zero") @@ -308,8 +300,11 @@ func (syncService *SyncService[H]) initStore(ctx context.Context, initial H) (bo return false, err } - if err := syncService.store.Sync(ctx); err != nil { - return false, err + // Sync is optional for adapters - they may not need explicit syncing + if syncer, ok := syncService.store.(interface{ Sync(context.Context) error }); ok { + if err := syncer.Sync(ctx); err != nil { + return false, err + } } return true, nil @@ -342,8 +337,11 @@ func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ( return nil, err } - if err := syncService.store.Start(ctx); err != nil { - return nil, fmt.Errorf("error while starting store: %w", err) + // Start the store adapter if it has a Start method + if starter, ok := syncService.store.(interface{ Start(context.Context) error }); ok { + if err := starter.Start(ctx); err != nil { + return nil, fmt.Errorf("error while starting store: %w", err) + } } if syncService.p2pServer, err = newP2PServer(syncService.p2p.Host(), syncService.store, networkID); err != nil { @@ -480,14 +478,17 @@ func (syncService *SyncService[H]) Stop(ctx context.Context) error { if syncService.syncerStatus.isStarted() { err = errors.Join(err, syncService.syncer.Stop(ctx)) } - err = errors.Join(err, syncService.store.Stop(ctx)) + // Stop the store adapter if it has a Stop method + if stopper, ok := syncService.store.(interface{ Stop(context.Context) error }); ok { + err = errors.Join(err, stopper.Stop(ctx)) + } return err } // newP2PServer constructs a new ExchangeServer using the given Network as a protocolID suffix. func newP2PServer[H header.Header[H]]( host host.Host, - store *goheaderstore.Store[H], + store header.Store[H], network string, opts ...goheaderp2p.Option[goheaderp2p.ServerParameters], ) (*goheaderp2p.ExchangeServer[H], error) { diff --git a/pkg/sync/sync_service_test.go b/pkg/sync/sync_service_test.go index cd434bc7b9..b525f17ffe 100644 --- a/pkg/sync/sync_service_test.go +++ b/pkg/sync/sync_service_test.go @@ -61,7 +61,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { require.NoError(t, p2pClient.Start(ctx)) rktStore := store.New(mainKV) - svc, err := NewHeaderSyncService(mainKV, rktStore, conf, genesisDoc, p2pClient, logger) + svc, err := NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) err = svc.Start(ctx) require.NoError(t, err) @@ -101,7 +101,7 @@ func TestHeaderSyncServiceRestart(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = p2pClient.Close() }) - svc, err = NewHeaderSyncService(mainKV, rktStore, conf, genesisDoc, p2pClient, logger) + svc, err = NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) err = svc.Start(ctx) require.NoError(t, err) @@ -153,7 +153,7 @@ func TestHeaderSyncServiceInitFromHigherHeight(t *testing.T) { t.Cleanup(func() { _ = p2pClient.Close() }) rktStore := store.New(mainKV) - svc, err := NewHeaderSyncService(mainKV, rktStore, conf, genesisDoc, p2pClient, logger) + svc, err := NewHeaderSyncService(rktStore, conf, genesisDoc, p2pClient, logger) require.NoError(t, err) require.NoError(t, svc.Start(ctx)) t.Cleanup(func() { _ = svc.Stop(context.Background()) }) From ebae42f33571d2c72165f53be659d63f7f1133a2 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 13:32:02 +0100 Subject: [PATCH 02/21] simplify --- pkg/store/README.md | 90 --------------------------------------------- 1 file changed, 90 deletions(-) diff --git a/pkg/store/README.md b/pkg/store/README.md index b5f58b36d0..49f4e878ff 100644 --- a/pkg/store/README.md +++ b/pkg/store/README.md @@ -180,93 +180,3 @@ err = batch.Commit(ctx) ## Store Adapters for P2P Integration The store package provides adapter implementations that wrap the ev-node store to satisfy the `header.Store[H]` interface from the `go-header` library. This enables the ev-node store to be used directly by go-header's P2P infrastructure, eliminating data duplication. - -### Background - -Previously, ev-node maintained redundant storage: - -1. **ev-node store** - Primary store for headers, data, state, and metadata -2. **go-header stores** - Separate stores for P2P sync (headerSync and dataSync prefixes) - -This resulted in the same headers and data being stored multiple times. - -### Solution: Store Adapters - -Two adapter types wrap the ev-node store: - -- **`HeaderStoreAdapter`** - Implements `header.Store[*types.SignedHeader]` -- **`DataStoreAdapter`** - Implements `header.Store[*types.Data]` - -These adapters: - -- Read from the single ev-node store -- Write to the ev-node store when receiving data via P2P -- Provide the interface required by go-header's exchange server, syncer, and subscriber - -### Usage - -```go -// Create the ev-node store -evStore := store.New(kvStore) - -// Create adapters for P2P infrastructure -headerAdapter := store.NewHeaderStoreAdapter(evStore) -dataAdapter := store.NewDataStoreAdapter(evStore) - -// Use with go-header P2P components -exchangeServer, _ := goheaderp2p.NewExchangeServer(host, headerAdapter, opts...) -``` - -### Benefits - -1. **Single source of truth** - All block data lives in one place -2. **Reduced disk usage** - No duplicate storage of headers and data -3. **Simplified rollback** - Rolling back the ev-node store automatically affects P2P sync -4. **Atomic operations** - Block storage and state updates remain atomic - -### Architecture - -```mermaid -classDiagram - class Store { - <> - +GetHeader(height) SignedHeader - +GetBlockData(height) (SignedHeader, Data) - +GetBlockByHash(hash) (SignedHeader, Data) - +NewBatch() Batch - } - - class HeaderStoreAdapter { - -store Store - -height atomic.Uint64 - +Head() SignedHeader - +GetByHeight(height) SignedHeader - +Get(hash) SignedHeader - +Append(headers...) error - +Height() uint64 - } - - class DataStoreAdapter { - -store Store - -height atomic.Uint64 - +Head() Data - +GetByHeight(height) Data - +Get(hash) Data - +Append(data...) error - +Height() uint64 - } - - class GoHeaderStore { - <> - +Head() H - +Get(hash) H - +GetByHeight(height) H - +Append(items...) error - +Height() uint64 - } - - Store <-- HeaderStoreAdapter : wraps - Store <-- DataStoreAdapter : wraps - GoHeaderStore <|.. HeaderStoreAdapter : implements - GoHeaderStore <|.. DataStoreAdapter : implements -``` From d56115d3a80a4befa27683665f35150e1be4e7dd Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 14:00:47 +0100 Subject: [PATCH 03/21] go mod tidy --- apps/evm/go.mod | 2 +- apps/testapp/go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/evm/go.mod b/apps/evm/go.mod index a7bf96bd17..8bf4767764 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -8,7 +8,6 @@ replace ( ) require ( - github.com/celestiaorg/go-header v0.8.1 github.com/ethereum/go-ethereum v1.16.8 github.com/evstack/ev-node v1.0.0-rc.1 github.com/evstack/ev-node/core v1.0.0-rc.1 @@ -30,6 +29,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect + github.com/celestiaorg/go-header v0.8.1 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 687e77801a..07fc9f3a74 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -3,7 +3,6 @@ module github.com/evstack/ev-node/apps/testapp go 1.25.0 require ( - github.com/celestiaorg/go-header v0.8.1 github.com/evstack/ev-node v1.0.0-rc.1 github.com/evstack/ev-node/core v1.0.0-rc.1 github.com/ipfs/go-datastore v0.9.0 @@ -19,6 +18,7 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect + github.com/celestiaorg/go-header v0.8.1 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect From 1e26328e8854814cc8775b07f6da51b50bc65ef0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 14:21:51 +0100 Subject: [PATCH 04/21] append in cache instead of duplicate store --- pkg/store/data_store_adapter.go | 257 +++++++++++++++-------- pkg/store/data_store_adapter_test.go | 191 +++++++++++++++-- pkg/store/header_store_adapter.go | 276 +++++++++++++++++-------- pkg/store/header_store_adapter_test.go | 216 ++++++++++++++++--- 4 files changed, 727 insertions(+), 213 deletions(-) diff --git a/pkg/store/data_store_adapter.go b/pkg/store/data_store_adapter.go index 6f54d17d53..1dd63127a7 100644 --- a/pkg/store/data_store_adapter.go +++ b/pkg/store/data_store_adapter.go @@ -3,11 +3,11 @@ package store import ( "context" "errors" - "fmt" "sync" "sync/atomic" "github.com/celestiaorg/go-header" + lru "github.com/hashicorp/golang-lru/v2" "github.com/evstack/ev-node/types" ) @@ -15,6 +15,11 @@ import ( // DataStoreAdapter wraps Store to implement header.Store[*types.Data]. // This allows the ev-node store to be used directly by go-header's P2P infrastructure, // eliminating the need for a separate go-header store and reducing data duplication. +// +// The adapter maintains an in-memory cache for data received via P2P (through Append). +// This cache allows the go-header syncer and P2P handler to access data before it +// is validated and persisted by the ev-node syncer. Once the ev-node syncer processes +// a block, it writes to the underlying store, and subsequent reads will come from the store. type DataStoreAdapter struct { store Store @@ -26,6 +31,10 @@ type DataStoreAdapter struct { mu sync.RWMutex initialized bool + // pendingData is an LRU cache for data received via Append that hasn't been + // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. + pendingData *lru.Cache[uint64, *types.Data] + // onDeleteFn is called when data is deleted (for rollback scenarios) onDeleteFn func(context.Context, uint64) error } @@ -35,8 +44,12 @@ var _ header.Store[*types.Data] = (*DataStoreAdapter)(nil) // NewDataStoreAdapter creates a new DataStoreAdapter wrapping the given store. func NewDataStoreAdapter(store Store) *DataStoreAdapter { + // Create LRU cache for pending data - ignore error as size is constant and valid + pendingCache, _ := lru.New[uint64, *types.Data](defaultPendingCacheSize) + adapter := &DataStoreAdapter{ - store: store, + store: store, + pendingData: pendingCache, } // Initialize height from store @@ -75,21 +88,50 @@ func (a *DataStoreAdapter) Stop(ctx context.Context) error { // Head returns the data for the highest block in the store. func (a *DataStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.Data]) (*types.Data, error) { - height := a.height.Load() - if height == 0 { - // Try to refresh from store - h, err := a.store.Height(ctx) - if err != nil { + // First check the store height + storeHeight, err := a.store.Height(ctx) + if err != nil && storeHeight == 0 { + // Check pending data + if a.pendingData.Len() == 0 { return nil, header.ErrNotFound } - if h == 0 { - return nil, header.ErrNotFound + + // Find the highest pending data + var maxHeight uint64 + var head *types.Data + for _, h := range a.pendingData.Keys() { + if d, ok := a.pendingData.Peek(h); ok && h > maxHeight { + maxHeight = h + head = d + } } - a.height.Store(h) - height = h + if head != nil { + return head, nil + } + return nil, header.ErrNotFound } - _, data, err := a.store.GetBlockData(ctx, height) + // Check if we have higher pending data + var maxPending uint64 + var pendingHead *types.Data + for _, h := range a.pendingData.Keys() { + if d, ok := a.pendingData.Peek(h); ok && h > maxPending { + maxPending = h + pendingHead = d + } + } + + if maxPending > storeHeight && pendingHead != nil { + a.height.Store(maxPending) + return pendingHead, nil + } + + if storeHeight == 0 { + return nil, header.ErrNotFound + } + + a.height.Store(storeHeight) + _, data, err := a.store.GetBlockData(ctx, storeHeight) if err != nil { return nil, header.ErrNotFound } @@ -102,7 +144,12 @@ func (a *DataStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*typ func (a *DataStoreAdapter) Tail(ctx context.Context) (*types.Data, error) { height := a.height.Load() if height == 0 { - return nil, header.ErrNotFound + // Check store + h, err := a.store.Height(ctx) + if err != nil || h == 0 { + return nil, header.ErrNotFound + } + height = h } // Try height 1 first (most common case) @@ -111,12 +158,20 @@ func (a *DataStoreAdapter) Tail(ctx context.Context) (*types.Data, error) { return data, nil } + // Check pending for height 1 + if pendingData, ok := a.pendingData.Peek(1); ok { + return pendingData, nil + } + // Linear scan from 1 to current height to find first data for h := uint64(2); h <= height; h++ { _, data, err = a.store.GetBlockData(ctx, h) if err == nil { return data, nil } + if pendingData, ok := a.pendingData.Peek(h); ok { + return pendingData, nil + } } return nil, header.ErrNotFound @@ -124,20 +179,36 @@ func (a *DataStoreAdapter) Tail(ctx context.Context) (*types.Data, error) { // Get returns data by its hash. func (a *DataStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.Data, error) { + // First try the store _, data, err := a.store.GetBlockByHash(ctx, hash) - if err != nil { - return nil, header.ErrNotFound + if err == nil { + return data, nil } - return data, nil + + // Check pending data - note: this checks data hash, not header hash + for _, h := range a.pendingData.Keys() { + if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytesEqual(pendingData.Hash(), hash) { + return pendingData, nil + } + } + + return nil, header.ErrNotFound } // GetByHeight returns data at the given height. func (a *DataStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.Data, error) { + // First try the store _, data, err := a.store.GetBlockData(ctx, height) - if err != nil { - return nil, header.ErrNotFound + if err == nil { + return data, nil } - return data, nil + + // Check pending data + if pendingData, ok := a.pendingData.Peek(height); ok { + return pendingData, nil + } + + return nil, header.ErrNotFound } // GetRangeByHeight returns data in the range [from.Height()+1, to). @@ -164,7 +235,7 @@ func (a *DataStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*ty dataList := make([]*types.Data, 0, to-from) for height := from; height < to; height++ { - _, data, err := a.store.GetBlockData(ctx, height) + data, err := a.GetByHeight(ctx, height) if err != nil { // Return what we have so far if len(dataList) > 0 { @@ -180,35 +251,71 @@ func (a *DataStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*ty // Has checks if data with the given hash exists. func (a *DataStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { + // Check store first _, _, err := a.store.GetBlockByHash(ctx, hash) - if err != nil { - return false, nil + if err == nil { + return true, nil + } + + // Check pending data + for _, h := range a.pendingData.Keys() { + if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytesEqual(pendingData.Hash(), hash) { + return true, nil + } } - return true, nil + + return false, nil } // HasAt checks if data exists at the given height. func (a *DataStoreAdapter) HasAt(ctx context.Context, height uint64) bool { + // Check store first _, _, err := a.store.GetBlockData(ctx, height) - return err == nil + if err == nil { + return true + } + + // Check pending data + return a.pendingData.Contains(height) } // Height returns the current height of the store. func (a *DataStoreAdapter) Height() uint64 { + // Check store first + if h, err := a.store.Height(context.Background()); err == nil && h > 0 { + // Also check pending for higher heights + maxPending := uint64(0) + for _, height := range a.pendingData.Keys() { + if height > maxPending { + maxPending = height + } + } + + if maxPending > h { + a.height.Store(maxPending) + return maxPending + } + a.height.Store(h) + return h + } + + // Fall back to cached height or check pending height := a.height.Load() - if height == 0 { - // Try to refresh from store - if h, err := a.store.Height(context.Background()); err == nil { - a.height.Store(h) - return h + if height > 0 { + return height + } + + for _, h := range a.pendingData.Keys() { + if h > height { + height = h } } return height } -// Append stores data in the store. -// This method is called by go-header's P2P infrastructure when data is received. -// We save the data to the ev-node store to ensure it's available for the syncer. +// Append stores data in the pending cache. +// This data is received via P2P and will be available for retrieval +// until the ev-node syncer processes and persists it to the store. func (a *DataStoreAdapter) Append(ctx context.Context, dataList ...*types.Data) error { if len(dataList) == 0 { return nil @@ -219,48 +326,20 @@ func (a *DataStoreAdapter) Append(ctx context.Context, dataList ...*types.Data) continue } - // Check if we already have this data - if a.HasAt(ctx, data.Height()) { - continue - } + height := data.Height() - // Create a batch and save the data - // Note: We create a minimal header since we only have the data at this point. - // The full block will be saved by the syncer when processing from DA. - batch, err := a.store.NewBatch(ctx) - if err != nil { - return fmt.Errorf("failed to create batch for data at height %d: %w", data.Height(), err) - } - - // Create a minimal header for the data - // The syncer will overwrite this with complete block data when it processes from DA - minimalHeader := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: data.ChainID(), - Height: data.Height(), - Time: uint64(data.Time().UnixNano()), - }, - LastHeaderHash: data.LastHeader(), - DataHash: data.DACommitment(), - }, - } - - if err := batch.SaveBlockData(minimalHeader, data, &types.Signature{}); err != nil { - return fmt.Errorf("failed to save data at height %d: %w", data.Height(), err) - } - - if err := batch.SetHeight(data.Height()); err != nil { - return fmt.Errorf("failed to set height for data at height %d: %w", data.Height(), err) + // Check if already in store + if _, _, err := a.store.GetBlockData(ctx, height); err == nil { + // Already persisted, skip + continue } - if err := batch.Commit(); err != nil { - return fmt.Errorf("failed to commit data at height %d: %w", data.Height(), err) - } + // Add to pending cache (LRU will evict oldest if full) + a.pendingData.Add(height, data) // Update cached height - if data.Height() > a.height.Load() { - a.height.Store(data.Height()) + if height > a.height.Load() { + a.height.Store(height) } } @@ -281,33 +360,28 @@ func (a *DataStoreAdapter) Init(ctx context.Context, d *types.Data) error { return nil } - // Use Append to save the data - a.mu.Unlock() // Unlock before calling Append to avoid deadlock - err := a.Append(ctx, d) - a.mu.Lock() // Re-lock for the initialized flag update - - if err != nil { - return err - } - + // Add to pending cache (LRU will evict oldest if full) + a.pendingData.Add(d.Height(), d) + a.height.Store(d.Height()) a.initialized = true + return nil } // Sync ensures all pending writes are flushed. -// Delegates to the underlying store's sync if available. +// No-op for the adapter as pending data is in-memory cache. func (a *DataStoreAdapter) Sync(ctx context.Context) error { - // The underlying store handles its own syncing return nil } // DeleteRange deletes data in the range [from, to). // This is used for rollback operations. func (a *DataStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { - // Rollback is handled by the ev-node store's Rollback method - // This is called during store cleanup operations - if a.onDeleteFn != nil { - for height := from; height < to; height++ { + // Remove from pending cache + for height := from; height < to; height++ { + a.pendingData.Remove(height) + + if a.onDeleteFn != nil { if err := a.onDeleteFn(ctx, height); err != nil { return err } @@ -335,6 +409,14 @@ func (a *DataStoreAdapter) RefreshHeight(ctx context.Context) error { return err } a.height.Store(h) + + // Clean up pending data that is now in store + for _, height := range a.pendingData.Keys() { + if height <= h { + a.pendingData.Remove(height) + } + } + return nil } @@ -342,4 +424,11 @@ func (a *DataStoreAdapter) RefreshHeight(ctx context.Context) error { // This is useful when the syncer knows the new height after processing a block. func (a *DataStoreAdapter) SetHeight(height uint64) { a.height.Store(height) + + // Clean up pending data at or below this height + for _, h := range a.pendingData.Keys() { + if h <= height { + a.pendingData.Remove(h) + } + } } diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go index 6d2fbb0c07..578c18b344 100644 --- a/pkg/store/data_store_adapter_test.go +++ b/pkg/store/data_store_adapter_test.go @@ -54,14 +54,14 @@ func TestDataStoreAdapter_AppendAndRetrieve(t *testing.T) { _, d1 := types.GetRandomBlock(1, 2, "test-chain") _, d2 := types.GetRandomBlock(2, 2, "test-chain") - // Append data + // Append data - these go to pending cache err = adapter.Append(ctx, d1, d2) require.NoError(t, err) - // Check height is updated + // Check height is updated (from pending) assert.Equal(t, uint64(2), adapter.Height()) - // Retrieve by height + // Retrieve by height (from pending) retrieved, err := adapter.GetByHeight(ctx, 1) require.NoError(t, err) assert.Equal(t, d1.Height(), retrieved.Height()) @@ -70,13 +70,13 @@ func TestDataStoreAdapter_AppendAndRetrieve(t *testing.T) { require.NoError(t, err) assert.Equal(t, d2.Height(), retrieved.Height()) - // Head should return the latest + // Head should return the latest (from pending) head, err := adapter.Head(ctx) require.NoError(t, err) assert.Equal(t, uint64(2), head.Height()) } -func TestDataStoreAdapter_Get(t *testing.T) { +func TestDataStoreAdapter_GetFromStore(t *testing.T) { t.Parallel() ctx := context.Background() @@ -147,7 +147,32 @@ func TestDataStoreAdapter_HasAt(t *testing.T) { _, d1 := types.GetRandomBlock(1, 2, "test-chain") require.NoError(t, adapter.Append(ctx, d1)) - // HasAt should return true for existing height + // HasAt should return true for pending height + assert.True(t, adapter.HasAt(ctx, 1)) + + // HasAt should return false for non-existent height + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestDataStoreAdapter_HasAtFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save directly to store + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewDataStoreAdapter(store) + + // HasAt should return true for stored height assert.True(t, adapter.HasAt(ctx, 1)) // HasAt should return false for non-existent height @@ -163,7 +188,7 @@ func TestDataStoreAdapter_GetRange(t *testing.T) { store := New(ds) adapter := NewDataStoreAdapter(store) - // Create and append multiple data blocks + // Create and append multiple data blocks to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") _, d3 := types.GetRandomBlock(3, 1, "test-chain") @@ -215,11 +240,11 @@ func TestDataStoreAdapter_Init(t *testing.T) { _, d1 := types.GetRandomBlock(1, 1, "test-chain") - // Init should save the data + // Init should add data to pending err = adapter.Init(ctx, d1) require.NoError(t, err) - // Verify it's stored + // Verify it's retrievable from pending retrieved, err := adapter.GetByHeight(ctx, 1) require.NoError(t, err) assert.Equal(t, d1.Height(), retrieved.Height()) @@ -229,7 +254,7 @@ func TestDataStoreAdapter_Init(t *testing.T) { err = adapter.Init(ctx, d2) require.NoError(t, err) - // Height 2 should not be stored since Init was already done + // Height 2 should not be in pending since Init was already done assert.False(t, adapter.HasAt(ctx, 2)) } @@ -250,7 +275,31 @@ func TestDataStoreAdapter_Tail(t *testing.T) { _, d2 := types.GetRandomBlock(2, 1, "test-chain") require.NoError(t, adapter.Append(ctx, d1, d2)) - // Tail should return the first data + // Tail should return the first data from pending + tail, err := adapter.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), tail.Height()) +} + +func TestDataStoreAdapter_TailFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save directly to store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewDataStoreAdapter(store) + + // Tail should return the first data from store tail, err := adapter.Tail(ctx) require.NoError(t, err) assert.Equal(t, uint64(1), tail.Height()) @@ -290,12 +339,19 @@ func TestDataStoreAdapter_DeleteRange(t *testing.T) { assert.Equal(t, uint64(3), adapter.Height()) - // DeleteRange should update cached height + // DeleteRange should update cached height and remove from pending err = adapter.DeleteRange(ctx, 2, 4) require.NoError(t, err) // Cached height should be updated to 1 assert.Equal(t, uint64(1), adapter.Height()) + + // Heights 2 and 3 should no longer be available + assert.False(t, adapter.HasAt(ctx, 2)) + assert.False(t, adapter.HasAt(ctx, 3)) + + // Height 1 should still be available + assert.True(t, adapter.HasAt(ctx, 1)) } func TestDataStoreAdapter_OnDelete(t *testing.T) { @@ -341,8 +397,7 @@ func TestDataStoreAdapter_RefreshHeight(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - // Adapter height may be stale - // RefreshHeight should update it + // RefreshHeight should update from store and clean pending err = adapter.RefreshHeight(ctx) require.NoError(t, err) assert.Equal(t, uint64(1), adapter.Height()) @@ -350,14 +405,24 @@ func TestDataStoreAdapter_RefreshHeight(t *testing.T) { func TestDataStoreAdapter_SetHeight(t *testing.T) { t.Parallel() + ctx := context.Background() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) adapter := NewDataStoreAdapter(store) - adapter.SetHeight(42) - assert.Equal(t, uint64(42), adapter.Height()) + // Add some pending data + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1, d2)) + + // SetHeight should update height and clean pending at or below + adapter.SetHeight(1) + assert.Equal(t, uint64(1), adapter.Height()) + + // Height 2 should still be in pending + assert.True(t, adapter.HasAt(ctx, 2)) } func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { @@ -367,12 +432,18 @@ func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) - _, d1 := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, d1)) + // Save directly to store first + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) - // Append the same data again should not error (skips existing) + adapter := NewDataStoreAdapter(store) + + // Append the same data again should not error (skips existing in store) err = adapter.Append(ctx, d1) require.NoError(t, err) @@ -550,3 +621,83 @@ func TestDataStoreAdapter_MultipleAppends(t *testing.T) { assert.True(t, adapter.HasAt(ctx, h)) } } + +func TestDataStoreAdapter_PendingAndStoreInteraction(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Add data to pending + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1)) + + // Verify it's in pending + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, d1.Height(), retrieved.Height()) + + // Now save a different data at height 1 directly to store + h1Store, d1Store := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1Store, d1Store, &h1Store.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // GetByHeight should now return from store (store takes precedence) + retrieved, err = adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + // The store version should be returned + assert.Equal(t, d1Store.Height(), retrieved.Height()) +} + +func TestDataStoreAdapter_HeadPrefersPending(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save height 1 to store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewDataStoreAdapter(store) + + // Add height 2 to pending + _, d2 := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d2)) + + // Head should return the pending data (higher height) + head, err := adapter.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(2), head.Height()) +} + +func TestDataStoreAdapter_GetFromPendingByHash(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewDataStoreAdapter(store) + + // Add data to pending + _, d1 := types.GetRandomBlock(1, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, d1)) + + // Get by hash from pending (uses data's Hash() method) + retrieved, err := adapter.Get(ctx, d1.Hash()) + require.NoError(t, err) + assert.Equal(t, d1.Height(), retrieved.Height()) +} diff --git a/pkg/store/header_store_adapter.go b/pkg/store/header_store_adapter.go index 15c81a6bc4..8b00065ccc 100644 --- a/pkg/store/header_store_adapter.go +++ b/pkg/store/header_store_adapter.go @@ -3,18 +3,29 @@ package store import ( "context" "errors" - "fmt" "sync" "sync/atomic" "github.com/celestiaorg/go-header" + lru "github.com/hashicorp/golang-lru/v2" "github.com/evstack/ev-node/types" ) +const ( + // defaultPendingCacheSize is the default size for the pending headers/data LRU cache. + // This should be large enough to handle P2P sync bursts but bounded to prevent memory issues. + defaultPendingCacheSize = 1000 +) + // HeaderStoreAdapter wraps Store to implement header.Store[*types.SignedHeader]. // This allows the ev-node store to be used directly by go-header's P2P infrastructure, // eliminating the need for a separate go-header store and reducing data duplication. +// +// The adapter maintains an in-memory cache for headers received via P2P (through Append). +// This cache allows the go-header syncer and P2P handler to access headers before they +// are validated and persisted by the ev-node syncer. Once the ev-node syncer processes +// a block, it writes to the underlying store, and subsequent reads will come from the store. type HeaderStoreAdapter struct { store Store @@ -26,6 +37,10 @@ type HeaderStoreAdapter struct { mu sync.RWMutex initialized bool + // pendingHeaders is an LRU cache for headers received via Append that haven't been + // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. + pendingHeaders *lru.Cache[uint64, *types.SignedHeader] + // onDeleteFn is called when headers are deleted (for rollback scenarios) onDeleteFn func(context.Context, uint64) error } @@ -35,8 +50,12 @@ var _ header.Store[*types.SignedHeader] = (*HeaderStoreAdapter)(nil) // NewHeaderStoreAdapter creates a new HeaderStoreAdapter wrapping the given store. func NewHeaderStoreAdapter(store Store) *HeaderStoreAdapter { + // Create LRU cache for pending headers - ignore error as size is constant and valid + pendingCache, _ := lru.New[uint64, *types.SignedHeader](defaultPendingCacheSize) + adapter := &HeaderStoreAdapter{ - store: store, + store: store, + pendingHeaders: pendingCache, } // Initialize height from store @@ -75,21 +94,50 @@ func (a *HeaderStoreAdapter) Stop(ctx context.Context) error { // Head returns the highest header in the store. func (a *HeaderStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.SignedHeader]) (*types.SignedHeader, error) { - height := a.height.Load() - if height == 0 { - // Try to refresh from store - h, err := a.store.Height(ctx) - if err != nil { + // First check the store height + storeHeight, err := a.store.Height(ctx) + if err != nil && storeHeight == 0 { + // Check pending headers + if a.pendingHeaders.Len() == 0 { return nil, header.ErrNotFound } - if h == 0 { - return nil, header.ErrNotFound + + // Find the highest pending header + var maxHeight uint64 + var head *types.SignedHeader + for _, h := range a.pendingHeaders.Keys() { + if hdr, ok := a.pendingHeaders.Peek(h); ok && h > maxHeight { + maxHeight = h + head = hdr + } } - a.height.Store(h) - height = h + if head != nil { + return head, nil + } + return nil, header.ErrNotFound } - hdr, err := a.store.GetHeader(ctx, height) + // Check if we have a higher pending header + var maxPending uint64 + var pendingHead *types.SignedHeader + for _, h := range a.pendingHeaders.Keys() { + if hdr, ok := a.pendingHeaders.Peek(h); ok && h > maxPending { + maxPending = h + pendingHead = hdr + } + } + + if maxPending > storeHeight && pendingHead != nil { + a.height.Store(maxPending) + return pendingHead, nil + } + + if storeHeight == 0 { + return nil, header.ErrNotFound + } + + a.height.Store(storeHeight) + hdr, err := a.store.GetHeader(ctx, storeHeight) if err != nil { return nil, header.ErrNotFound } @@ -100,11 +148,14 @@ func (a *HeaderStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*t // Tail returns the lowest header in the store. // For ev-node, this is typically the genesis/initial height. func (a *HeaderStoreAdapter) Tail(ctx context.Context) (*types.SignedHeader, error) { - // Start from height 1 and find the first available header - // This is a simple implementation; could be optimized with metadata height := a.height.Load() if height == 0 { - return nil, header.ErrNotFound + // Check store + h, err := a.store.Height(ctx) + if err != nil || h == 0 { + return nil, header.ErrNotFound + } + height = h } // Try height 1 first (most common case) @@ -113,12 +164,20 @@ func (a *HeaderStoreAdapter) Tail(ctx context.Context) (*types.SignedHeader, err return hdr, nil } + // Check pending for height 1 + if pendingHdr, ok := a.pendingHeaders.Peek(1); ok { + return pendingHdr, nil + } + // Linear scan from 1 to current height to find first header for h := uint64(2); h <= height; h++ { hdr, err = a.store.GetHeader(ctx, h) if err == nil { return hdr, nil } + if pendingHdr, ok := a.pendingHeaders.Peek(h); ok { + return pendingHdr, nil + } } return nil, header.ErrNotFound @@ -126,20 +185,36 @@ func (a *HeaderStoreAdapter) Tail(ctx context.Context) (*types.SignedHeader, err // Get returns a header by its hash. func (a *HeaderStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.SignedHeader, error) { + // First try the store hdr, _, err := a.store.GetBlockByHash(ctx, hash) - if err != nil { - return nil, header.ErrNotFound + if err == nil { + return hdr, nil } - return hdr, nil + + // Check pending headers + for _, h := range a.pendingHeaders.Keys() { + if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytesEqual(pendingHdr.Hash(), hash) { + return pendingHdr, nil + } + } + + return nil, header.ErrNotFound } // GetByHeight returns a header at the given height. func (a *HeaderStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.SignedHeader, error) { + // First try the store hdr, err := a.store.GetHeader(ctx, height) - if err != nil { - return nil, header.ErrNotFound + if err == nil { + return hdr, nil } - return hdr, nil + + // Check pending headers + if pendingHdr, ok := a.pendingHeaders.Peek(height); ok { + return pendingHdr, nil + } + + return nil, header.ErrNotFound } // GetRangeByHeight returns headers in the range [from.Height()+1, to). @@ -166,7 +241,7 @@ func (a *HeaderStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]* headers := make([]*types.SignedHeader, 0, to-from) for height := from; height < to; height++ { - hdr, err := a.store.GetHeader(ctx, height) + hdr, err := a.GetByHeight(ctx, height) if err != nil { // Return what we have so far if len(headers) > 0 { @@ -182,35 +257,71 @@ func (a *HeaderStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]* // Has checks if a header with the given hash exists. func (a *HeaderStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { + // Check store first _, _, err := a.store.GetBlockByHash(ctx, hash) - if err != nil { - return false, nil + if err == nil { + return true, nil } - return true, nil + + // Check pending headers + for _, h := range a.pendingHeaders.Keys() { + if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytesEqual(pendingHdr.Hash(), hash) { + return true, nil + } + } + + return false, nil } // HasAt checks if a header exists at the given height. func (a *HeaderStoreAdapter) HasAt(ctx context.Context, height uint64) bool { + // Check store first _, err := a.store.GetHeader(ctx, height) - return err == nil + if err == nil { + return true + } + + // Check pending headers + return a.pendingHeaders.Contains(height) } // Height returns the current height of the store. func (a *HeaderStoreAdapter) Height() uint64 { + // Check store first + if h, err := a.store.Height(context.Background()); err == nil && h > 0 { + // Also check pending for higher heights + maxPending := uint64(0) + for _, height := range a.pendingHeaders.Keys() { + if height > maxPending { + maxPending = height + } + } + + if maxPending > h { + a.height.Store(maxPending) + return maxPending + } + a.height.Store(h) + return h + } + + // Fall back to cached height or check pending height := a.height.Load() - if height == 0 { - // Try to refresh from store - if h, err := a.store.Height(context.Background()); err == nil { - a.height.Store(h) - return h + if height > 0 { + return height + } + + for _, h := range a.pendingHeaders.Keys() { + if h > height { + height = h } } return height } -// Append stores headers in the store. -// This method is called by go-header's P2P infrastructure when headers are received. -// We save the headers to the ev-node store to ensure they're available for the syncer. +// Append stores headers in the pending cache. +// These headers are received via P2P and will be available for retrieval +// until the ev-node syncer processes and persists them to the store. func (a *HeaderStoreAdapter) Append(ctx context.Context, headers ...*types.SignedHeader) error { if len(headers) == 0 { return nil @@ -221,46 +332,20 @@ func (a *HeaderStoreAdapter) Append(ctx context.Context, headers ...*types.Signe continue } - // Check if we already have this header - if a.HasAt(ctx, hdr.Height()) { - continue - } - - // Create a batch and save the header - // Note: We create empty data since we only have the header at this point. - // The full block data will be saved by the syncer when processing from DA. - batch, err := a.store.NewBatch(ctx) - if err != nil { - return fmt.Errorf("failed to create batch for header at height %d: %w", hdr.Height(), err) - } - - // Save header with empty data and signature - // The syncer will overwrite this with complete block data when it processes from DA - emptyData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: hdr.ChainID(), - Height: hdr.Height(), - Time: uint64(hdr.Time().UnixNano()), - LastDataHash: hdr.LastHeader(), - }, - Txs: nil, - } + height := hdr.Height() - if err := batch.SaveBlockData(hdr, emptyData, &hdr.Signature); err != nil { - return fmt.Errorf("failed to save header at height %d: %w", hdr.Height(), err) + // Check if already in store + if _, err := a.store.GetHeader(ctx, height); err == nil { + // Already persisted, skip + continue } - if err := batch.SetHeight(hdr.Height()); err != nil { - return fmt.Errorf("failed to set height for header at height %d: %w", hdr.Height(), err) - } - - if err := batch.Commit(); err != nil { - return fmt.Errorf("failed to commit header at height %d: %w", hdr.Height(), err) - } + // Add to pending cache (LRU will evict oldest if full) + a.pendingHeaders.Add(height, hdr) // Update cached height - if hdr.Height() > a.height.Load() { - a.height.Store(hdr.Height()) + if height > a.height.Load() { + a.height.Store(height) } } @@ -281,33 +366,28 @@ func (a *HeaderStoreAdapter) Init(ctx context.Context, h *types.SignedHeader) er return nil } - // Use Append to save the header - a.mu.Unlock() // Unlock before calling Append to avoid deadlock - err := a.Append(ctx, h) - a.mu.Lock() // Re-lock for the initialized flag update - - if err != nil { - return err - } - + // Add to pending cache (LRU will evict oldest if full) + a.pendingHeaders.Add(h.Height(), h) + a.height.Store(h.Height()) a.initialized = true + return nil } // Sync ensures all pending writes are flushed. -// Delegates to the underlying store's sync if available. +// No-op for the adapter as pending data is in-memory cache. func (a *HeaderStoreAdapter) Sync(ctx context.Context) error { - // The underlying store handles its own syncing return nil } // DeleteRange deletes headers in the range [from, to). // This is used for rollback operations. func (a *HeaderStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { - // Rollback is handled by the ev-node store's Rollback method - // This is called during store cleanup operations - if a.onDeleteFn != nil { - for height := from; height < to; height++ { + // Remove from pending cache + for height := from; height < to; height++ { + a.pendingHeaders.Remove(height) + + if a.onDeleteFn != nil { if err := a.onDeleteFn(ctx, height); err != nil { return err } @@ -335,6 +415,14 @@ func (a *HeaderStoreAdapter) RefreshHeight(ctx context.Context) error { return err } a.height.Store(h) + + // Clean up pending headers that are now in store + for _, height := range a.pendingHeaders.Keys() { + if height <= h { + a.pendingHeaders.Remove(height) + } + } + return nil } @@ -342,4 +430,24 @@ func (a *HeaderStoreAdapter) RefreshHeight(ctx context.Context) error { // This is useful when the syncer knows the new height after processing a block. func (a *HeaderStoreAdapter) SetHeight(height uint64) { a.height.Store(height) + + // Clean up pending headers at or below this height + for _, h := range a.pendingHeaders.Keys() { + if h <= height { + a.pendingHeaders.Remove(h) + } + } +} + +// bytesEqual compares two byte slices for equality. +func bytesEqual(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true } diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go index 2b6bff84bd..df604cae1c 100644 --- a/pkg/store/header_store_adapter_test.go +++ b/pkg/store/header_store_adapter_test.go @@ -53,14 +53,14 @@ func TestHeaderStoreAdapter_AppendAndRetrieve(t *testing.T) { h1, _ := types.GetRandomBlock(1, 2, "test-chain") h2, _ := types.GetRandomBlock(2, 2, "test-chain") - // Append headers + // Append headers - these go to pending cache err = adapter.Append(ctx, h1, h2) require.NoError(t, err) - // Check height is updated + // Check height is updated (from pending) assert.Equal(t, uint64(2), adapter.Height()) - // Retrieve by height + // Retrieve by height (from pending) retrieved, err := adapter.GetByHeight(ctx, 1) require.NoError(t, err) assert.Equal(t, h1.Height(), retrieved.Height()) @@ -69,23 +69,30 @@ func TestHeaderStoreAdapter_AppendAndRetrieve(t *testing.T) { require.NoError(t, err) assert.Equal(t, h2.Height(), retrieved.Height()) - // Head should return the latest + // Head should return the latest (from pending) head, err := adapter.Head(ctx) require.NoError(t, err) assert.Equal(t, uint64(2), head.Height()) } -func TestHeaderStoreAdapter_Get(t *testing.T) { +func TestHeaderStoreAdapter_GetFromStore(t *testing.T) { t.Parallel() ctx := context.Background() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) - h1, _ := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + // Save directly to store first + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // Create adapter after data is in store + adapter := NewHeaderStoreAdapter(store) // Get by hash - need to use the index hash (sha256 of marshaled SignedHeader) hash := computeHeaderIndexHash(h1) @@ -105,10 +112,16 @@ func TestHeaderStoreAdapter_Has(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) - h1, _ := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + // Save directly to store + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewHeaderStoreAdapter(store) // Has should return true for existing header - use index hash has, err := adapter.Has(ctx, computeHeaderIndexHash(h1)) @@ -133,7 +146,32 @@ func TestHeaderStoreAdapter_HasAt(t *testing.T) { h1, _ := types.GetRandomBlock(1, 2, "test-chain") require.NoError(t, adapter.Append(ctx, h1)) - // HasAt should return true for existing height + // HasAt should return true for pending height + assert.True(t, adapter.HasAt(ctx, 1)) + + // HasAt should return false for non-existent height + assert.False(t, adapter.HasAt(ctx, 2)) +} + +func TestHeaderStoreAdapter_HasAtFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save directly to store + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewHeaderStoreAdapter(store) + + // HasAt should return true for stored height assert.True(t, adapter.HasAt(ctx, 1)) // HasAt should return false for non-existent height @@ -149,7 +187,7 @@ func TestHeaderStoreAdapter_GetRange(t *testing.T) { store := New(ds) adapter := NewHeaderStoreAdapter(store) - // Create and append multiple headers + // Create and append multiple headers to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") h3, _ := types.GetRandomBlock(3, 1, "test-chain") @@ -201,11 +239,11 @@ func TestHeaderStoreAdapter_Init(t *testing.T) { h1, _ := types.GetRandomBlock(1, 1, "test-chain") - // Init should save the header + // Init should add header to pending err = adapter.Init(ctx, h1) require.NoError(t, err) - // Verify it's stored + // Verify it's retrievable from pending retrieved, err := adapter.GetByHeight(ctx, 1) require.NoError(t, err) assert.Equal(t, h1.Height(), retrieved.Height()) @@ -215,7 +253,7 @@ func TestHeaderStoreAdapter_Init(t *testing.T) { err = adapter.Init(ctx, h2) require.NoError(t, err) - // Height 2 should not be stored since Init was already done + // Height 2 should not be in pending since Init was already done assert.False(t, adapter.HasAt(ctx, 2)) } @@ -242,6 +280,30 @@ func TestHeaderStoreAdapter_Tail(t *testing.T) { assert.Equal(t, uint64(1), tail.Height()) } +func TestHeaderStoreAdapter_TailFromStore(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save directly to store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewHeaderStoreAdapter(store) + + // Tail should return the first header from store + tail, err := adapter.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(1), tail.Height()) +} + func TestHeaderStoreAdapter_StartStop(t *testing.T) { t.Parallel() ctx := context.Background() @@ -276,12 +338,19 @@ func TestHeaderStoreAdapter_DeleteRange(t *testing.T) { assert.Equal(t, uint64(3), adapter.Height()) - // DeleteRange should update cached height + // DeleteRange should update cached height and remove from pending err = adapter.DeleteRange(ctx, 2, 4) require.NoError(t, err) // Cached height should be updated to 1 assert.Equal(t, uint64(1), adapter.Height()) + + // Heights 2 and 3 should no longer be available + assert.False(t, adapter.HasAt(ctx, 2)) + assert.False(t, adapter.HasAt(ctx, 3)) + + // Height 1 should still be available + assert.True(t, adapter.HasAt(ctx, 1)) } func TestHeaderStoreAdapter_OnDelete(t *testing.T) { @@ -327,8 +396,7 @@ func TestHeaderStoreAdapter_RefreshHeight(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - // Adapter height may be stale - // RefreshHeight should update it + // RefreshHeight should update from store and clean pending err = adapter.RefreshHeight(ctx) require.NoError(t, err) assert.Equal(t, uint64(1), adapter.Height()) @@ -336,14 +404,26 @@ func TestHeaderStoreAdapter_RefreshHeight(t *testing.T) { func TestHeaderStoreAdapter_SetHeight(t *testing.T) { t.Parallel() + ctx := context.Background() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) adapter := NewHeaderStoreAdapter(store) - adapter.SetHeight(42) - assert.Equal(t, uint64(42), adapter.Height()) + // Add some pending headers + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1, h2)) + + // SetHeight should update height and clean pending at or below + adapter.SetHeight(1) + assert.Equal(t, uint64(1), adapter.Height()) + + // Height 1 should be cleaned from pending + // (but since we don't have store data, HasAt returns false now for pending) + // Height 2 should still be in pending + assert.True(t, adapter.HasAt(ctx, 2)) } func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { @@ -353,12 +433,18 @@ func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) - h1, _ := types.GetRandomBlock(1, 2, "test-chain") - require.NoError(t, adapter.Append(ctx, h1)) + // Save directly to store first + h1, d1 := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewHeaderStoreAdapter(store) - // Append the same header again should not error (skips existing) + // Append the same header again should not error (skips existing in store) err = adapter.Append(ctx, h1) require.NoError(t, err) @@ -472,3 +558,83 @@ func TestHeaderStoreAdapter_ContextTimeout(t *testing.T) { // the adapter passes the context through _ = adapter.Append(ctx, h1) } + +func TestHeaderStoreAdapter_PendingAndStoreInteraction(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Add header to pending + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // Verify it's in pending + retrieved, err := adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + assert.Equal(t, h1.Height(), retrieved.Height()) + + // Now save a different header at height 1 directly to store + h1Store, d1Store := types.GetRandomBlock(1, 2, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1Store, d1Store, &h1Store.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + // GetByHeight should now return from store (store takes precedence) + retrieved, err = adapter.GetByHeight(ctx, 1) + require.NoError(t, err) + // The store version should be returned + assert.Equal(t, h1Store.Height(), retrieved.Height()) +} + +func TestHeaderStoreAdapter_HeadPrefersPending(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + + // Save height 1 to store + h1, d1 := types.GetRandomBlock(1, 1, "test-chain") + batch, err := store.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(h1, d1, &h1.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + adapter := NewHeaderStoreAdapter(store) + + // Add height 2 to pending + h2, _ := types.GetRandomBlock(2, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h2)) + + // Head should return the pending header (higher height) + head, err := adapter.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(2), head.Height()) +} + +func TestHeaderStoreAdapter_GetFromPendingByHash(t *testing.T) { + t.Parallel() + ctx := context.Background() + + ds, err := NewTestInMemoryKVStore() + require.NoError(t, err) + store := New(ds) + adapter := NewHeaderStoreAdapter(store) + + // Add header to pending + h1, _ := types.GetRandomBlock(1, 1, "test-chain") + require.NoError(t, adapter.Append(ctx, h1)) + + // Get by hash from pending (uses header's Hash() method) + retrieved, err := adapter.Get(ctx, h1.Hash()) + require.NoError(t, err) + assert.Equal(t, h1.Height(), retrieved.Height()) +} From bfc0701944aebd7e27b9fcf76857331e4b455b49 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 14:36:14 +0100 Subject: [PATCH 05/21] clean-up --- pkg/store/data_store_adapter.go | 32 ------------------ pkg/store/data_store_adapter_test.go | 45 ------------------------ pkg/store/header_store_adapter.go | 39 ++------------------- pkg/store/header_store_adapter_test.go | 47 -------------------------- pkg/sync/sync_service.go | 2 -- 5 files changed, 2 insertions(+), 163 deletions(-) diff --git a/pkg/store/data_store_adapter.go b/pkg/store/data_store_adapter.go index 1dd63127a7..4e34de954f 100644 --- a/pkg/store/data_store_adapter.go +++ b/pkg/store/data_store_adapter.go @@ -400,35 +400,3 @@ func (a *DataStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) err func (a *DataStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { a.onDeleteFn = fn } - -// RefreshHeight updates the cached height from the underlying store. -// This should be called after the syncer processes a new block. -func (a *DataStoreAdapter) RefreshHeight(ctx context.Context) error { - h, err := a.store.Height(ctx) - if err != nil { - return err - } - a.height.Store(h) - - // Clean up pending data that is now in store - for _, height := range a.pendingData.Keys() { - if height <= h { - a.pendingData.Remove(height) - } - } - - return nil -} - -// SetHeight updates the cached height. -// This is useful when the syncer knows the new height after processing a block. -func (a *DataStoreAdapter) SetHeight(height uint64) { - a.height.Store(height) - - // Clean up pending data at or below this height - for _, h := range a.pendingData.Keys() { - if h <= height { - a.pendingData.Remove(h) - } - } -} diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go index 578c18b344..2bc3ff5c3a 100644 --- a/pkg/store/data_store_adapter_test.go +++ b/pkg/store/data_store_adapter_test.go @@ -380,51 +380,6 @@ func TestDataStoreAdapter_OnDelete(t *testing.T) { assert.Equal(t, []uint64{1, 2}, deletedHeights) } -func TestDataStoreAdapter_RefreshHeight(t *testing.T) { - t.Parallel() - ctx := context.Background() - - ds, err := NewTestInMemoryKVStore() - require.NoError(t, err) - store := New(ds) - adapter := NewDataStoreAdapter(store) - - // Save a block directly to the underlying store - h1, d1 := types.GetRandomBlock(1, 1, "test-chain") - batch, err := store.NewBatch(ctx) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) - require.NoError(t, batch.SetHeight(1)) - require.NoError(t, batch.Commit()) - - // RefreshHeight should update from store and clean pending - err = adapter.RefreshHeight(ctx) - require.NoError(t, err) - assert.Equal(t, uint64(1), adapter.Height()) -} - -func TestDataStoreAdapter_SetHeight(t *testing.T) { - t.Parallel() - ctx := context.Background() - - ds, err := NewTestInMemoryKVStore() - require.NoError(t, err) - store := New(ds) - adapter := NewDataStoreAdapter(store) - - // Add some pending data - _, d1 := types.GetRandomBlock(1, 1, "test-chain") - _, d2 := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, d1, d2)) - - // SetHeight should update height and clean pending at or below - adapter.SetHeight(1) - assert.Equal(t, uint64(1), adapter.Height()) - - // Height 2 should still be in pending - assert.True(t, adapter.HasAt(ctx, 2)) -} - func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { t.Parallel() ctx := context.Background() diff --git a/pkg/store/header_store_adapter.go b/pkg/store/header_store_adapter.go index 8b00065ccc..991ae39b85 100644 --- a/pkg/store/header_store_adapter.go +++ b/pkg/store/header_store_adapter.go @@ -12,11 +12,8 @@ import ( "github.com/evstack/ev-node/types" ) -const ( - // defaultPendingCacheSize is the default size for the pending headers/data LRU cache. - // This should be large enough to handle P2P sync bursts but bounded to prevent memory issues. - defaultPendingCacheSize = 1000 -) +// defaultPendingCacheSize is the default size for the pending headers/data LRU cache. +const defaultPendingCacheSize = 1000 // HeaderStoreAdapter wraps Store to implement header.Store[*types.SignedHeader]. // This allows the ev-node store to be used directly by go-header's P2P infrastructure, @@ -407,38 +404,6 @@ func (a *HeaderStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { a.onDeleteFn = fn } -// RefreshHeight updates the cached height from the underlying store. -// This should be called after the syncer processes a new block. -func (a *HeaderStoreAdapter) RefreshHeight(ctx context.Context) error { - h, err := a.store.Height(ctx) - if err != nil { - return err - } - a.height.Store(h) - - // Clean up pending headers that are now in store - for _, height := range a.pendingHeaders.Keys() { - if height <= h { - a.pendingHeaders.Remove(height) - } - } - - return nil -} - -// SetHeight updates the cached height. -// This is useful when the syncer knows the new height after processing a block. -func (a *HeaderStoreAdapter) SetHeight(height uint64) { - a.height.Store(height) - - // Clean up pending headers at or below this height - for _, h := range a.pendingHeaders.Keys() { - if h <= height { - a.pendingHeaders.Remove(h) - } - } -} - // bytesEqual compares two byte slices for equality. func bytesEqual(a, b []byte) bool { if len(a) != len(b) { diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go index df604cae1c..3300def67b 100644 --- a/pkg/store/header_store_adapter_test.go +++ b/pkg/store/header_store_adapter_test.go @@ -379,53 +379,6 @@ func TestHeaderStoreAdapter_OnDelete(t *testing.T) { assert.Equal(t, []uint64{1, 2}, deletedHeights) } -func TestHeaderStoreAdapter_RefreshHeight(t *testing.T) { - t.Parallel() - ctx := context.Background() - - ds, err := NewTestInMemoryKVStore() - require.NoError(t, err) - store := New(ds) - adapter := NewHeaderStoreAdapter(store) - - // Save a block directly to the underlying store - h1, d1 := types.GetRandomBlock(1, 1, "test-chain") - batch, err := store.NewBatch(ctx) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(h1, d1, &types.Signature{})) - require.NoError(t, batch.SetHeight(1)) - require.NoError(t, batch.Commit()) - - // RefreshHeight should update from store and clean pending - err = adapter.RefreshHeight(ctx) - require.NoError(t, err) - assert.Equal(t, uint64(1), adapter.Height()) -} - -func TestHeaderStoreAdapter_SetHeight(t *testing.T) { - t.Parallel() - ctx := context.Background() - - ds, err := NewTestInMemoryKVStore() - require.NoError(t, err) - store := New(ds) - adapter := NewHeaderStoreAdapter(store) - - // Add some pending headers - h1, _ := types.GetRandomBlock(1, 1, "test-chain") - h2, _ := types.GetRandomBlock(2, 1, "test-chain") - require.NoError(t, adapter.Append(ctx, h1, h2)) - - // SetHeight should update height and clean pending at or below - adapter.SetHeight(1) - assert.Equal(t, uint64(1), adapter.Height()) - - // Height 1 should be cleaned from pending - // (but since we don't have store data, HasAt returns false now for pending) - // Height 2 should still be in pending - assert.True(t, adapter.HasAt(ctx, 2)) -} - func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { t.Parallel() ctx := context.Background() diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index be72e97179..7bdb160548 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -189,8 +189,6 @@ func (syncService *SyncService[H]) Store() header.Store[H] { } // WriteToStoreAndBroadcast broadcasts provided header or block to P2P network. -// Note: With the store adapter approach, actual storage is handled by the syncer -// writing to the ev-node store. This method primarily handles P2P broadcasting. func (syncService *SyncService[H]) WriteToStoreAndBroadcast(ctx context.Context, headerOrData H, opts ...pubsub.PubOpt) error { if syncService.genesis.InitialHeight == 0 { return fmt.Errorf("invalid initial height; cannot be zero") From ca8651b0534f0be7413c7894d3668cbfa141512a Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 16:59:29 +0100 Subject: [PATCH 06/21] remove exchanger (partial revert #2855) --- .mockery.yaml | 12 - pkg/sync/exchange_wrapper.go | 103 ------- pkg/sync/exchange_wrapper_test.go | 181 ------------ pkg/sync/sync_service.go | 98 +------ test/mocks/external/hexchange.go | 330 ---------------------- test/mocks/external/p2pexchange.go | 432 ----------------------------- test/mocks/store.go | 34 --- 7 files changed, 11 insertions(+), 1179 deletions(-) delete mode 100644 pkg/sync/exchange_wrapper.go delete mode 100644 pkg/sync/exchange_wrapper_test.go delete mode 100644 test/mocks/external/hexchange.go delete mode 100644 test/mocks/external/p2pexchange.go diff --git a/.mockery.yaml b/.mockery.yaml index 22e2cd7621..b1119926e1 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -44,23 +44,11 @@ packages: filename: batch.go github.com/celestiaorg/go-header: interfaces: - Exchange: - config: - dir: ./test/mocks - pkgname: mocks - filename: external/hexchange.go Store: config: dir: ./test/mocks pkgname: mocks filename: external/hstore.go - github.com/evstack/ev-node/pkg/sync: - interfaces: - P2PExchange: - config: - dir: ./test/mocks - pkgname: mocks - filename: external/p2pexchange.go github.com/evstack/ev-node/block/internal/syncing: interfaces: DARetriever: diff --git a/pkg/sync/exchange_wrapper.go b/pkg/sync/exchange_wrapper.go deleted file mode 100644 index a5031c3526..0000000000 --- a/pkg/sync/exchange_wrapper.go +++ /dev/null @@ -1,103 +0,0 @@ -package sync - -import ( - "context" - - "github.com/celestiaorg/go-header" -) - -// GetterFunc retrieves a header by hash from a backing store. -type GetterFunc[H header.Header[H]] func(context.Context, header.Hash) (H, error) - -// GetterByHeightFunc retrieves a header by height from a backing store. -type GetterByHeightFunc[H header.Header[H]] func(context.Context, uint64) (H, error) - -// RangeGetterFunc retrieves headers in range [from, to) from a backing store. -// Returns the contiguous headers found starting from 'from', and the next height needed. -type RangeGetterFunc[H header.Header[H]] func(ctx context.Context, from, to uint64) ([]H, uint64, error) - -// P2PExchange defines the interface for the underlying P2P exchange. -type P2PExchange[H header.Header[H]] interface { - header.Exchange[H] - Start(context.Context) error - Stop(context.Context) error -} - -type exchangeWrapper[H header.Header[H]] struct { - p2pExchange P2PExchange[H] - getter GetterFunc[H] - getterByHeight GetterByHeightFunc[H] - rangeGetter RangeGetterFunc[H] -} - -func (ew *exchangeWrapper[H]) Get(ctx context.Context, hash header.Hash) (H, error) { - // Check DA store first - if ew.getter != nil { - if h, err := ew.getter(ctx, hash); err == nil && !h.IsZero() { - return h, nil - } - } - - // Fallback to network exchange - return ew.p2pExchange.Get(ctx, hash) -} - -func (ew *exchangeWrapper[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { - // Check DA store first - if ew.getterByHeight != nil { - if h, err := ew.getterByHeight(ctx, height); err == nil && !h.IsZero() { - return h, nil - } - } - - // Fallback to network exchange - return ew.p2pExchange.GetByHeight(ctx, height) -} - -func (ew *exchangeWrapper[H]) Head(ctx context.Context, opts ...header.HeadOption[H]) (H, error) { - return ew.p2pExchange.Head(ctx, opts...) -} - -func (ew *exchangeWrapper[H]) GetRangeByHeight(ctx context.Context, from H, to uint64) ([]H, error) { - fromHeight := from.Height() + 1 - - // If no range getter, fallback entirely to P2P - if ew.rangeGetter == nil { - return ew.p2pExchange.GetRangeByHeight(ctx, from, to) - } - - // Try DA store first for contiguous range - daHeaders, nextHeight, err := ew.rangeGetter(ctx, fromHeight, to) - if err != nil { - // DA store failed, fallback to P2P for entire range - return ew.p2pExchange.GetRangeByHeight(ctx, from, to) - } - - // Got everything from DA - if nextHeight >= to { - return daHeaders, nil - } - - // Need remainder from P2P - if len(daHeaders) == 0 { - // Nothing from DA, get entire range from P2P - return ew.p2pExchange.GetRangeByHeight(ctx, from, to) - } - - // Get remainder from P2P starting after last DA header - lastDAHeader := daHeaders[len(daHeaders)-1] - p2pHeaders, err := ew.p2pExchange.GetRangeByHeight(ctx, lastDAHeader, to) - if err != nil { - return nil, err - } - - return append(daHeaders, p2pHeaders...), nil -} - -func (ew *exchangeWrapper[H]) Start(ctx context.Context) error { - return ew.p2pExchange.Start(ctx) -} - -func (ew *exchangeWrapper[H]) Stop(ctx context.Context) error { - return ew.p2pExchange.Stop(ctx) -} diff --git a/pkg/sync/exchange_wrapper_test.go b/pkg/sync/exchange_wrapper_test.go deleted file mode 100644 index ef57494286..0000000000 --- a/pkg/sync/exchange_wrapper_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package sync - -import ( - "context" - "errors" - "testing" - - "github.com/celestiaorg/go-header" - extmocks "github.com/evstack/ev-node/test/mocks/external" - "github.com/evstack/ev-node/types" - "github.com/stretchr/testify/assert" -) - -func TestExchangeWrapper_Get(t *testing.T) { - ctx := context.Background() - hash := header.Hash([]byte("test-hash")) - expectedHeader := &types.SignedHeader{} // Just a dummy - - t.Run("Hit in Store", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - // Exchange should NOT be called - - getter := func(ctx context.Context, h header.Hash) (*types.SignedHeader, error) { - return expectedHeader, nil - } - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - getter: getter, - } - - h, err := ew.Get(ctx, hash) - assert.NoError(t, err) - assert.Equal(t, expectedHeader, h) - }) - - t.Run("Miss in Store", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - mockEx.EXPECT().Get(ctx, hash).Return(expectedHeader, nil) - - getter := func(ctx context.Context, h header.Hash) (*types.SignedHeader, error) { - return nil, errors.New("not found") - } - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - getter: getter, - } - - h, err := ew.Get(ctx, hash) - assert.NoError(t, err) - assert.Equal(t, expectedHeader, h) - }) - - t.Run("Getter Not Configured", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - mockEx.EXPECT().Get(ctx, hash).Return(expectedHeader, nil) - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - getter: nil, - } - - h, err := ew.Get(ctx, hash) - assert.NoError(t, err) - assert.Equal(t, expectedHeader, h) - }) -} - -func TestExchangeWrapper_GetByHeight(t *testing.T) { - ctx := context.Background() - height := uint64(10) - expectedHeader := &types.SignedHeader{} - - t.Run("Hit in Store", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - - getterByHeight := func(ctx context.Context, h uint64) (*types.SignedHeader, error) { - return expectedHeader, nil - } - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - getterByHeight: getterByHeight, - } - - h, err := ew.GetByHeight(ctx, height) - assert.NoError(t, err) - assert.Equal(t, expectedHeader, h) - }) - - t.Run("Miss in Store", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - mockEx.EXPECT().GetByHeight(ctx, height).Return(expectedHeader, nil) - - getterByHeight := func(ctx context.Context, h uint64) (*types.SignedHeader, error) { - return nil, errors.New("not found") - } - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - getterByHeight: getterByHeight, - } - - h, err := ew.GetByHeight(ctx, height) - assert.NoError(t, err) - assert.Equal(t, expectedHeader, h) - }) -} - -func TestExchangeWrapper_GetRangeByHeight(t *testing.T) { - ctx := context.Background() - - t.Run("All from DA", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - - headers := []*types.SignedHeader{ - {Header: types.Header{BaseHeader: types.BaseHeader{Height: 2}}}, - {Header: types.Header{BaseHeader: types.BaseHeader{Height: 3}}}, - } - from := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: 1}}} - - rangeGetter := func(ctx context.Context, fromH, toH uint64) ([]*types.SignedHeader, uint64, error) { - return headers, 4, nil - } - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - rangeGetter: rangeGetter, - } - - result, err := ew.GetRangeByHeight(ctx, from, 4) - assert.NoError(t, err) - assert.Equal(t, headers, result) - }) - - t.Run("Partial from DA then P2P", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - - daHeaders := []*types.SignedHeader{ - {Header: types.Header{BaseHeader: types.BaseHeader{Height: 2}}}, - } - p2pHeaders := []*types.SignedHeader{ - {Header: types.Header{BaseHeader: types.BaseHeader{Height: 3}}}, - } - from := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: 1}}} - - rangeGetter := func(ctx context.Context, fromH, toH uint64) ([]*types.SignedHeader, uint64, error) { - return daHeaders, 3, nil // only got height 2, need 3+ - } - mockEx.EXPECT().GetRangeByHeight(ctx, daHeaders[0], uint64(4)).Return(p2pHeaders, nil) - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - rangeGetter: rangeGetter, - } - - result, err := ew.GetRangeByHeight(ctx, from, 4) - assert.NoError(t, err) - assert.Equal(t, append(daHeaders, p2pHeaders...), result) - }) - - t.Run("No range getter fallback to P2P", func(t *testing.T) { - mockEx := extmocks.NewMockP2PExchange[*types.SignedHeader](t) - - from := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: 1}}} - expected := []*types.SignedHeader{ - {Header: types.Header{BaseHeader: types.BaseHeader{Height: 2}}}, - } - mockEx.EXPECT().GetRangeByHeight(ctx, from, uint64(3)).Return(expected, nil) - - ew := &exchangeWrapper[*types.SignedHeader]{ - p2pExchange: mockEx, - rangeGetter: nil, - } - - result, err := ew.GetRangeByHeight(ctx, from, 3) - assert.NoError(t, err) - assert.Equal(t, expected, result) - }) -} diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 7bdb160548..21184a5136 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -47,7 +47,7 @@ type SyncService[H header.Header[H]] struct { p2p *p2p.Client - ex *exchangeWrapper[H] + ex *goheaderp2p.Exchange[H] sub *goheaderp2p.Subscriber[H] p2pServer *goheaderp2p.ExchangeServer[H] store header.Store[H] @@ -55,9 +55,6 @@ type SyncService[H header.Header[H]] struct { syncerStatus *SyncerStatus topicSubscription header.Subscription[H] - getter GetterFunc[H] - getterByHeight GetterByHeightFunc[H] - rangeGetter RangeGetterFunc[H] storeInitialized atomic.Bool } @@ -75,29 +72,8 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - var getter GetterFunc[*types.Data] - var getterByHeight GetterByHeightFunc[*types.Data] - var rangeGetter RangeGetterFunc[*types.Data] - - if evStore != nil { - getter = func(ctx context.Context, hash header.Hash) (*types.Data, error) { - _, d, err := evStore.GetBlockByHash(ctx, hash) - return d, err - } - getterByHeight = func(ctx context.Context, height uint64) (*types.Data, error) { - _, d, err := evStore.GetBlockData(ctx, height) - return d, err - } - rangeGetter = func(ctx context.Context, from, to uint64) ([]*types.Data, uint64, error) { - return getContiguousRange(ctx, from, to, func(ctx context.Context, h uint64) (*types.Data, error) { - _, d, err := evStore.GetBlockData(ctx, h) - return d, err - }) - } - } - storeAdapter := store.NewDataStoreAdapter(evStore) - return newSyncService[*types.Data](storeAdapter, getter, getterByHeight, rangeGetter, dataSync, conf, genesis, p2p, logger) + return newSyncService[*types.Data](storeAdapter, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -108,32 +84,13 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - var getter GetterFunc[*types.SignedHeader] - var getterByHeight GetterByHeightFunc[*types.SignedHeader] - var rangeGetter RangeGetterFunc[*types.SignedHeader] - - if evStore != nil { - getter = func(ctx context.Context, hash header.Hash) (*types.SignedHeader, error) { - h, _, err := evStore.GetBlockByHash(ctx, hash) - return h, err - } - getterByHeight = func(ctx context.Context, height uint64) (*types.SignedHeader, error) { - return evStore.GetHeader(ctx, height) - } - rangeGetter = func(ctx context.Context, from, to uint64) ([]*types.SignedHeader, uint64, error) { - return getContiguousRange(ctx, from, to, evStore.GetHeader) - } - } storeAdapter := store.NewHeaderStoreAdapter(evStore) - return newSyncService[*types.SignedHeader](storeAdapter, getter, getterByHeight, rangeGetter, headerSync, conf, genesis, p2p, logger) + return newSyncService[*types.SignedHeader](storeAdapter, headerSync, conf, genesis, p2p, logger) } func newSyncService[H header.Header[H]]( storeAdapter header.Store[H], - getter GetterFunc[H], - getterByHeight GetterByHeightFunc[H], - rangeGetter RangeGetterFunc[H], syncType syncType, conf config.Config, genesis genesis.Genesis, @@ -145,44 +102,18 @@ func newSyncService[H header.Header[H]]( } svc := &SyncService[H]{ - conf: conf, - genesis: genesis, - p2p: p2p, - store: storeAdapter, - getter: getter, - getterByHeight: getterByHeight, - rangeGetter: rangeGetter, - syncType: syncType, - logger: logger, - syncerStatus: new(SyncerStatus), + conf: conf, + genesis: genesis, + p2p: p2p, + store: storeAdapter, + syncType: syncType, + logger: logger, + syncerStatus: new(SyncerStatus), } return svc, nil } -// getContiguousRange fetches headers/data for the given range [from, to). -// Returns the contiguous items found and the next height needed. -func getContiguousRange[H header.Header[H]]( - ctx context.Context, - from, to uint64, - getByHeight func(context.Context, uint64) (H, error), -) ([]H, uint64, error) { - if from >= to { - return nil, from, nil - } - - result := make([]H, 0, to-from) - for height := from; height < to; height++ { - h, err := getByHeight(ctx, height) - if err != nil || h.IsZero() { - // Gap found, return what we have so far - return result, height, nil - } - result = append(result, h) - } - return result, to, nil -} - // Store returns the store of the SyncService func (syncService *SyncService[H]) Store() header.Store[H] { return syncService.store @@ -351,18 +282,11 @@ func (syncService *SyncService[H]) setupP2PInfrastructure(ctx context.Context) ( peerIDs := syncService.getPeerIDs() - p2pExchange, err := newP2PExchange[H](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()) + syncService.ex, err = newP2PExchange[H](syncService.p2p.Host(), peerIDs, networkID, syncService.genesis.ChainID, syncService.p2p.ConnectionGater()) if err != nil { return nil, fmt.Errorf("error while creating exchange: %w", err) } - // Create exchange wrapper with DA store getters - syncService.ex = &exchangeWrapper[H]{ - p2pExchange: p2pExchange, - getter: syncService.getter, - getterByHeight: syncService.getterByHeight, - rangeGetter: syncService.rangeGetter, - } if err := syncService.ex.Start(ctx); err != nil { return nil, fmt.Errorf("error while starting exchange: %w", err) } diff --git a/test/mocks/external/hexchange.go b/test/mocks/external/hexchange.go deleted file mode 100644 index cfa5729e5d..0000000000 --- a/test/mocks/external/hexchange.go +++ /dev/null @@ -1,330 +0,0 @@ -// Code generated by mockery; DO NOT EDIT. -// github.com/vektra/mockery -// template: testify - -package mocks - -import ( - "context" - - "github.com/celestiaorg/go-header" - mock "github.com/stretchr/testify/mock" -) - -// NewMockExchange creates a new instance of MockExchange. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockExchange[H header.Header[H]](t interface { - mock.TestingT - Cleanup(func()) -}) *MockExchange[H] { - mock := &MockExchange[H]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} - -// MockExchange is an autogenerated mock type for the Exchange type -type MockExchange[H header.Header[H]] struct { - mock.Mock -} - -type MockExchange_Expecter[H header.Header[H]] struct { - mock *mock.Mock -} - -func (_m *MockExchange[H]) EXPECT() *MockExchange_Expecter[H] { - return &MockExchange_Expecter[H]{mock: &_m.Mock} -} - -// Get provides a mock function for the type MockExchange -func (_mock *MockExchange[H]) Get(context1 context.Context, hash header.Hash) (H, error) { - ret := _mock.Called(context1, hash) - - if len(ret) == 0 { - panic("no return value specified for Get") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, header.Hash) (H, error)); ok { - return returnFunc(context1, hash) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, header.Hash) H); ok { - r0 = returnFunc(context1, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, header.Hash) error); ok { - r1 = returnFunc(context1, hash) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockExchange_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' -type MockExchange_Get_Call[H header.Header[H]] struct { - *mock.Call -} - -// Get is a helper method to define mock.On call -// - context1 context.Context -// - hash header.Hash -func (_e *MockExchange_Expecter[H]) Get(context1 interface{}, hash interface{}) *MockExchange_Get_Call[H] { - return &MockExchange_Get_Call[H]{Call: _e.mock.On("Get", context1, hash)} -} - -func (_c *MockExchange_Get_Call[H]) Run(run func(context1 context.Context, hash header.Hash)) *MockExchange_Get_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 header.Hash - if args[1] != nil { - arg1 = args[1].(header.Hash) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockExchange_Get_Call[H]) Return(v H, err error) *MockExchange_Get_Call[H] { - _c.Call.Return(v, err) - return _c -} - -func (_c *MockExchange_Get_Call[H]) RunAndReturn(run func(context1 context.Context, hash header.Hash) (H, error)) *MockExchange_Get_Call[H] { - _c.Call.Return(run) - return _c -} - -// GetByHeight provides a mock function for the type MockExchange -func (_mock *MockExchange[H]) GetByHeight(context1 context.Context, v uint64) (H, error) { - ret := _mock.Called(context1, v) - - if len(ret) == 0 { - panic("no return value specified for GetByHeight") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, error)); ok { - return returnFunc(context1, v) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { - r0 = returnFunc(context1, v) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(context1, v) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockExchange_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' -type MockExchange_GetByHeight_Call[H header.Header[H]] struct { - *mock.Call -} - -// GetByHeight is a helper method to define mock.On call -// - context1 context.Context -// - v uint64 -func (_e *MockExchange_Expecter[H]) GetByHeight(context1 interface{}, v interface{}) *MockExchange_GetByHeight_Call[H] { - return &MockExchange_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", context1, v)} -} - -func (_c *MockExchange_GetByHeight_Call[H]) Run(run func(context1 context.Context, v uint64)) *MockExchange_GetByHeight_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockExchange_GetByHeight_Call[H]) Return(v1 H, err error) *MockExchange_GetByHeight_Call[H] { - _c.Call.Return(v1, err) - return _c -} - -func (_c *MockExchange_GetByHeight_Call[H]) RunAndReturn(run func(context1 context.Context, v uint64) (H, error)) *MockExchange_GetByHeight_Call[H] { - _c.Call.Return(run) - return _c -} - -// GetRangeByHeight provides a mock function for the type MockExchange -func (_mock *MockExchange[H]) GetRangeByHeight(ctx context.Context, from H, to uint64) ([]H, error) { - ret := _mock.Called(ctx, from, to) - - if len(ret) == 0 { - panic("no return value specified for GetRangeByHeight") - } - - var r0 []H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, H, uint64) ([]H, error)); ok { - return returnFunc(ctx, from, to) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, H, uint64) []H); ok { - r0 = returnFunc(ctx, from, to) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, H, uint64) error); ok { - r1 = returnFunc(ctx, from, to) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockExchange_GetRangeByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRangeByHeight' -type MockExchange_GetRangeByHeight_Call[H header.Header[H]] struct { - *mock.Call -} - -// GetRangeByHeight is a helper method to define mock.On call -// - ctx context.Context -// - from H -// - to uint64 -func (_e *MockExchange_Expecter[H]) GetRangeByHeight(ctx interface{}, from interface{}, to interface{}) *MockExchange_GetRangeByHeight_Call[H] { - return &MockExchange_GetRangeByHeight_Call[H]{Call: _e.mock.On("GetRangeByHeight", ctx, from, to)} -} - -func (_c *MockExchange_GetRangeByHeight_Call[H]) Run(run func(ctx context.Context, from H, to uint64)) *MockExchange_GetRangeByHeight_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 H - if args[1] != nil { - arg1 = args[1].(H) - } - var arg2 uint64 - if args[2] != nil { - arg2 = args[2].(uint64) - } - run( - arg0, - arg1, - arg2, - ) - }) - return _c -} - -func (_c *MockExchange_GetRangeByHeight_Call[H]) Return(vs []H, err error) *MockExchange_GetRangeByHeight_Call[H] { - _c.Call.Return(vs, err) - return _c -} - -func (_c *MockExchange_GetRangeByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, from H, to uint64) ([]H, error)) *MockExchange_GetRangeByHeight_Call[H] { - _c.Call.Return(run) - return _c -} - -// Head provides a mock function for the type MockExchange -func (_mock *MockExchange[H]) Head(context1 context.Context, headOptions ...header.HeadOption[H]) (H, error) { - // header.HeadOption[H] - _va := make([]interface{}, len(headOptions)) - for _i := range headOptions { - _va[_i] = headOptions[_i] - } - var _ca []interface{} - _ca = append(_ca, context1) - _ca = append(_ca, _va...) - ret := _mock.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Head") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, ...header.HeadOption[H]) (H, error)); ok { - return returnFunc(context1, headOptions...) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, ...header.HeadOption[H]) H); ok { - r0 = returnFunc(context1, headOptions...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, ...header.HeadOption[H]) error); ok { - r1 = returnFunc(context1, headOptions...) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockExchange_Head_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Head' -type MockExchange_Head_Call[H header.Header[H]] struct { - *mock.Call -} - -// Head is a helper method to define mock.On call -// - context1 context.Context -// - headOptions ...header.HeadOption[H] -func (_e *MockExchange_Expecter[H]) Head(context1 interface{}, headOptions ...interface{}) *MockExchange_Head_Call[H] { - return &MockExchange_Head_Call[H]{Call: _e.mock.On("Head", - append([]interface{}{context1}, headOptions...)...)} -} - -func (_c *MockExchange_Head_Call[H]) Run(run func(context1 context.Context, headOptions ...header.HeadOption[H])) *MockExchange_Head_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 []header.HeadOption[H] - variadicArgs := make([]header.HeadOption[H], len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(header.HeadOption[H]) - } - } - arg1 = variadicArgs - run( - arg0, - arg1..., - ) - }) - return _c -} - -func (_c *MockExchange_Head_Call[H]) Return(v H, err error) *MockExchange_Head_Call[H] { - _c.Call.Return(v, err) - return _c -} - -func (_c *MockExchange_Head_Call[H]) RunAndReturn(run func(context1 context.Context, headOptions ...header.HeadOption[H]) (H, error)) *MockExchange_Head_Call[H] { - _c.Call.Return(run) - return _c -} diff --git a/test/mocks/external/p2pexchange.go b/test/mocks/external/p2pexchange.go deleted file mode 100644 index 2ed96418cb..0000000000 --- a/test/mocks/external/p2pexchange.go +++ /dev/null @@ -1,432 +0,0 @@ -// Code generated by mockery; DO NOT EDIT. -// github.com/vektra/mockery -// template: testify - -package mocks - -import ( - "context" - - "github.com/celestiaorg/go-header" - mock "github.com/stretchr/testify/mock" -) - -// NewMockP2PExchange creates a new instance of MockP2PExchange. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockP2PExchange[H header.Header[H]](t interface { - mock.TestingT - Cleanup(func()) -}) *MockP2PExchange[H] { - mock := &MockP2PExchange[H]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} - -// MockP2PExchange is an autogenerated mock type for the P2PExchange type -type MockP2PExchange[H header.Header[H]] struct { - mock.Mock -} - -type MockP2PExchange_Expecter[H header.Header[H]] struct { - mock *mock.Mock -} - -func (_m *MockP2PExchange[H]) EXPECT() *MockP2PExchange_Expecter[H] { - return &MockP2PExchange_Expecter[H]{mock: &_m.Mock} -} - -// Get provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) Get(context1 context.Context, hash header.Hash) (H, error) { - ret := _mock.Called(context1, hash) - - if len(ret) == 0 { - panic("no return value specified for Get") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, header.Hash) (H, error)); ok { - return returnFunc(context1, hash) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, header.Hash) H); ok { - r0 = returnFunc(context1, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, header.Hash) error); ok { - r1 = returnFunc(context1, hash) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockP2PExchange_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' -type MockP2PExchange_Get_Call[H header.Header[H]] struct { - *mock.Call -} - -// Get is a helper method to define mock.On call -// - context1 context.Context -// - hash header.Hash -func (_e *MockP2PExchange_Expecter[H]) Get(context1 interface{}, hash interface{}) *MockP2PExchange_Get_Call[H] { - return &MockP2PExchange_Get_Call[H]{Call: _e.mock.On("Get", context1, hash)} -} - -func (_c *MockP2PExchange_Get_Call[H]) Run(run func(context1 context.Context, hash header.Hash)) *MockP2PExchange_Get_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 header.Hash - if args[1] != nil { - arg1 = args[1].(header.Hash) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockP2PExchange_Get_Call[H]) Return(v H, err error) *MockP2PExchange_Get_Call[H] { - _c.Call.Return(v, err) - return _c -} - -func (_c *MockP2PExchange_Get_Call[H]) RunAndReturn(run func(context1 context.Context, hash header.Hash) (H, error)) *MockP2PExchange_Get_Call[H] { - _c.Call.Return(run) - return _c -} - -// GetByHeight provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) GetByHeight(context1 context.Context, v uint64) (H, error) { - ret := _mock.Called(context1, v) - - if len(ret) == 0 { - panic("no return value specified for GetByHeight") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) (H, error)); ok { - return returnFunc(context1, v) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, uint64) H); ok { - r0 = returnFunc(context1, v) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = returnFunc(context1, v) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockP2PExchange_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' -type MockP2PExchange_GetByHeight_Call[H header.Header[H]] struct { - *mock.Call -} - -// GetByHeight is a helper method to define mock.On call -// - context1 context.Context -// - v uint64 -func (_e *MockP2PExchange_Expecter[H]) GetByHeight(context1 interface{}, v interface{}) *MockP2PExchange_GetByHeight_Call[H] { - return &MockP2PExchange_GetByHeight_Call[H]{Call: _e.mock.On("GetByHeight", context1, v)} -} - -func (_c *MockP2PExchange_GetByHeight_Call[H]) Run(run func(context1 context.Context, v uint64)) *MockP2PExchange_GetByHeight_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 uint64 - if args[1] != nil { - arg1 = args[1].(uint64) - } - run( - arg0, - arg1, - ) - }) - return _c -} - -func (_c *MockP2PExchange_GetByHeight_Call[H]) Return(v1 H, err error) *MockP2PExchange_GetByHeight_Call[H] { - _c.Call.Return(v1, err) - return _c -} - -func (_c *MockP2PExchange_GetByHeight_Call[H]) RunAndReturn(run func(context1 context.Context, v uint64) (H, error)) *MockP2PExchange_GetByHeight_Call[H] { - _c.Call.Return(run) - return _c -} - -// GetRangeByHeight provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) GetRangeByHeight(ctx context.Context, from H, to uint64) ([]H, error) { - ret := _mock.Called(ctx, from, to) - - if len(ret) == 0 { - panic("no return value specified for GetRangeByHeight") - } - - var r0 []H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, H, uint64) ([]H, error)); ok { - return returnFunc(ctx, from, to) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, H, uint64) []H); ok { - r0 = returnFunc(ctx, from, to) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, H, uint64) error); ok { - r1 = returnFunc(ctx, from, to) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockP2PExchange_GetRangeByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRangeByHeight' -type MockP2PExchange_GetRangeByHeight_Call[H header.Header[H]] struct { - *mock.Call -} - -// GetRangeByHeight is a helper method to define mock.On call -// - ctx context.Context -// - from H -// - to uint64 -func (_e *MockP2PExchange_Expecter[H]) GetRangeByHeight(ctx interface{}, from interface{}, to interface{}) *MockP2PExchange_GetRangeByHeight_Call[H] { - return &MockP2PExchange_GetRangeByHeight_Call[H]{Call: _e.mock.On("GetRangeByHeight", ctx, from, to)} -} - -func (_c *MockP2PExchange_GetRangeByHeight_Call[H]) Run(run func(ctx context.Context, from H, to uint64)) *MockP2PExchange_GetRangeByHeight_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 H - if args[1] != nil { - arg1 = args[1].(H) - } - var arg2 uint64 - if args[2] != nil { - arg2 = args[2].(uint64) - } - run( - arg0, - arg1, - arg2, - ) - }) - return _c -} - -func (_c *MockP2PExchange_GetRangeByHeight_Call[H]) Return(vs []H, err error) *MockP2PExchange_GetRangeByHeight_Call[H] { - _c.Call.Return(vs, err) - return _c -} - -func (_c *MockP2PExchange_GetRangeByHeight_Call[H]) RunAndReturn(run func(ctx context.Context, from H, to uint64) ([]H, error)) *MockP2PExchange_GetRangeByHeight_Call[H] { - _c.Call.Return(run) - return _c -} - -// Head provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) Head(context1 context.Context, headOptions ...header.HeadOption[H]) (H, error) { - // header.HeadOption[H] - _va := make([]interface{}, len(headOptions)) - for _i := range headOptions { - _va[_i] = headOptions[_i] - } - var _ca []interface{} - _ca = append(_ca, context1) - _ca = append(_ca, _va...) - ret := _mock.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Head") - } - - var r0 H - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, ...header.HeadOption[H]) (H, error)); ok { - return returnFunc(context1, headOptions...) - } - if returnFunc, ok := ret.Get(0).(func(context.Context, ...header.HeadOption[H]) H); ok { - r0 = returnFunc(context1, headOptions...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(H) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, ...header.HeadOption[H]) error); ok { - r1 = returnFunc(context1, headOptions...) - } else { - r1 = ret.Error(1) - } - return r0, r1 -} - -// MockP2PExchange_Head_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Head' -type MockP2PExchange_Head_Call[H header.Header[H]] struct { - *mock.Call -} - -// Head is a helper method to define mock.On call -// - context1 context.Context -// - headOptions ...header.HeadOption[H] -func (_e *MockP2PExchange_Expecter[H]) Head(context1 interface{}, headOptions ...interface{}) *MockP2PExchange_Head_Call[H] { - return &MockP2PExchange_Head_Call[H]{Call: _e.mock.On("Head", - append([]interface{}{context1}, headOptions...)...)} -} - -func (_c *MockP2PExchange_Head_Call[H]) Run(run func(context1 context.Context, headOptions ...header.HeadOption[H])) *MockP2PExchange_Head_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - var arg1 []header.HeadOption[H] - variadicArgs := make([]header.HeadOption[H], len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(header.HeadOption[H]) - } - } - arg1 = variadicArgs - run( - arg0, - arg1..., - ) - }) - return _c -} - -func (_c *MockP2PExchange_Head_Call[H]) Return(v H, err error) *MockP2PExchange_Head_Call[H] { - _c.Call.Return(v, err) - return _c -} - -func (_c *MockP2PExchange_Head_Call[H]) RunAndReturn(run func(context1 context.Context, headOptions ...header.HeadOption[H]) (H, error)) *MockP2PExchange_Head_Call[H] { - _c.Call.Return(run) - return _c -} - -// Start provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) Start(context1 context.Context) error { - ret := _mock.Called(context1) - - if len(ret) == 0 { - panic("no return value specified for Start") - } - - var r0 error - if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = returnFunc(context1) - } else { - r0 = ret.Error(0) - } - return r0 -} - -// MockP2PExchange_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type MockP2PExchange_Start_Call[H header.Header[H]] struct { - *mock.Call -} - -// Start is a helper method to define mock.On call -// - context1 context.Context -func (_e *MockP2PExchange_Expecter[H]) Start(context1 interface{}) *MockP2PExchange_Start_Call[H] { - return &MockP2PExchange_Start_Call[H]{Call: _e.mock.On("Start", context1)} -} - -func (_c *MockP2PExchange_Start_Call[H]) Run(run func(context1 context.Context)) *MockP2PExchange_Start_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockP2PExchange_Start_Call[H]) Return(err error) *MockP2PExchange_Start_Call[H] { - _c.Call.Return(err) - return _c -} - -func (_c *MockP2PExchange_Start_Call[H]) RunAndReturn(run func(context1 context.Context) error) *MockP2PExchange_Start_Call[H] { - _c.Call.Return(run) - return _c -} - -// Stop provides a mock function for the type MockP2PExchange -func (_mock *MockP2PExchange[H]) Stop(context1 context.Context) error { - ret := _mock.Called(context1) - - if len(ret) == 0 { - panic("no return value specified for Stop") - } - - var r0 error - if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = returnFunc(context1) - } else { - r0 = ret.Error(0) - } - return r0 -} - -// MockP2PExchange_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' -type MockP2PExchange_Stop_Call[H header.Header[H]] struct { - *mock.Call -} - -// Stop is a helper method to define mock.On call -// - context1 context.Context -func (_e *MockP2PExchange_Expecter[H]) Stop(context1 interface{}) *MockP2PExchange_Stop_Call[H] { - return &MockP2PExchange_Stop_Call[H]{Call: _e.mock.On("Stop", context1)} -} - -func (_c *MockP2PExchange_Stop_Call[H]) Run(run func(context1 context.Context)) *MockP2PExchange_Stop_Call[H] { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockP2PExchange_Stop_Call[H]) Return(err error) *MockP2PExchange_Stop_Call[H] { - _c.Call.Return(err) - return _c -} - -func (_c *MockP2PExchange_Stop_Call[H]) RunAndReturn(run func(context1 context.Context) error) *MockP2PExchange_Stop_Call[H] { - _c.Call.Return(run) - return _c -} diff --git a/test/mocks/store.go b/test/mocks/store.go index 84424ea016..7f2aa180d0 100644 --- a/test/mocks/store.go +++ b/test/mocks/store.go @@ -880,37 +880,3 @@ func (_c *MockStore_SetMetadata_Call) RunAndReturn(run func(ctx context.Context, _c.Call.Return(run) return _c } - -// MockStore_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' -type MockStore_Sync_Call struct { - *mock.Call -} - -// Sync is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockStore_Expecter) Sync(ctx interface{}) *MockStore_Sync_Call { - return &MockStore_Sync_Call{Call: _e.mock.On("Sync", ctx)} -} - -func (_c *MockStore_Sync_Call) Run(run func(ctx context.Context)) *MockStore_Sync_Call { - _c.Call.Run(func(args mock.Arguments) { - var arg0 context.Context - if args[0] != nil { - arg0 = args[0].(context.Context) - } - run( - arg0, - ) - }) - return _c -} - -func (_c *MockStore_Sync_Call) Return(err error) *MockStore_Sync_Call { - _c.Call.Return(err) - return _c -} - -func (_c *MockStore_Sync_Call) RunAndReturn(run func(ctx context.Context) error) *MockStore_Sync_Call { - _c.Call.Return(run) - return _c -} From 9092ede5e876ea56c05809a2c747731cfae6a181 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:02:54 +0100 Subject: [PATCH 07/21] add changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2b8e89a6c..c1d2ff7985 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Improve cache handling when there is a significant backlog of pending headers and data. ([#3030](https://github.com/evstack/ev-node/pull/3030)) - Decrease MaxBytesSize to `5MB` to increase compatibility with public nodes. ([#3030](https://github.com/evstack/ev-node/pull/3030)) +- Replace `go-header` store by `ev-node` store. This avoid duplication of all blocks in `go-header` and `ev-node` store. Thanks to the cached store from #3030, this should improve p2p performance as well. ## v1.0.0-rc.1 From 9c7488961b4e82653f9f91c87dca3320855881c0 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:04:26 +0100 Subject: [PATCH 08/21] updates --- pkg/store/data_store_adapter.go | 5 +++-- pkg/store/header_store_adapter.go | 18 +++--------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/pkg/store/data_store_adapter.go b/pkg/store/data_store_adapter.go index 4e34de954f..8d1ce0a9b1 100644 --- a/pkg/store/data_store_adapter.go +++ b/pkg/store/data_store_adapter.go @@ -1,6 +1,7 @@ package store import ( + "bytes" "context" "errors" "sync" @@ -187,7 +188,7 @@ func (a *DataStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.Da // Check pending data - note: this checks data hash, not header hash for _, h := range a.pendingData.Keys() { - if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytesEqual(pendingData.Hash(), hash) { + if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytes.Equal(pendingData.Hash(), hash) { return pendingData, nil } } @@ -259,7 +260,7 @@ func (a *DataStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, err // Check pending data for _, h := range a.pendingData.Keys() { - if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytesEqual(pendingData.Hash(), hash) { + if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytes.Equal(pendingData.Hash(), hash) { return true, nil } } diff --git a/pkg/store/header_store_adapter.go b/pkg/store/header_store_adapter.go index 991ae39b85..6d6701a8c2 100644 --- a/pkg/store/header_store_adapter.go +++ b/pkg/store/header_store_adapter.go @@ -1,6 +1,7 @@ package store import ( + "bytes" "context" "errors" "sync" @@ -190,7 +191,7 @@ func (a *HeaderStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types. // Check pending headers for _, h := range a.pendingHeaders.Keys() { - if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytesEqual(pendingHdr.Hash(), hash) { + if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytes.Equal(pendingHdr.Hash(), hash) { return pendingHdr, nil } } @@ -262,7 +263,7 @@ func (a *HeaderStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, e // Check pending headers for _, h := range a.pendingHeaders.Keys() { - if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytesEqual(pendingHdr.Hash(), hash) { + if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytes.Equal(pendingHdr.Hash(), hash) { return true, nil } } @@ -403,16 +404,3 @@ func (a *HeaderStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) e func (a *HeaderStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { a.onDeleteFn = fn } - -// bytesEqual compares two byte slices for equality. -func bytesEqual(a, b []byte) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} From dee6ed58428cd0dd6cacc74b894b08f87462649c Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:15:09 +0100 Subject: [PATCH 09/21] use generic and improve tail --- pkg/store/data_store_adapter.go | 403 ------------------- pkg/store/data_store_adapter_test.go | 64 +-- pkg/store/header_store_adapter.go | 406 ------------------- pkg/store/header_store_adapter_test.go | 58 +-- pkg/store/store_adapter.go | 513 +++++++++++++++++++++++++ pkg/sync/sync_service.go | 4 +- 6 files changed, 586 insertions(+), 862 deletions(-) delete mode 100644 pkg/store/data_store_adapter.go delete mode 100644 pkg/store/header_store_adapter.go create mode 100644 pkg/store/store_adapter.go diff --git a/pkg/store/data_store_adapter.go b/pkg/store/data_store_adapter.go deleted file mode 100644 index 8d1ce0a9b1..0000000000 --- a/pkg/store/data_store_adapter.go +++ /dev/null @@ -1,403 +0,0 @@ -package store - -import ( - "bytes" - "context" - "errors" - "sync" - "sync/atomic" - - "github.com/celestiaorg/go-header" - lru "github.com/hashicorp/golang-lru/v2" - - "github.com/evstack/ev-node/types" -) - -// DataStoreAdapter wraps Store to implement header.Store[*types.Data]. -// This allows the ev-node store to be used directly by go-header's P2P infrastructure, -// eliminating the need for a separate go-header store and reducing data duplication. -// -// The adapter maintains an in-memory cache for data received via P2P (through Append). -// This cache allows the go-header syncer and P2P handler to access data before it -// is validated and persisted by the ev-node syncer. Once the ev-node syncer processes -// a block, it writes to the underlying store, and subsequent reads will come from the store. -type DataStoreAdapter struct { - store Store - - // height caches the current height to avoid repeated context-based lookups. - // Updated on successful reads and writes. - height atomic.Uint64 - - // mu protects initialization state - mu sync.RWMutex - initialized bool - - // pendingData is an LRU cache for data received via Append that hasn't been - // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. - pendingData *lru.Cache[uint64, *types.Data] - - // onDeleteFn is called when data is deleted (for rollback scenarios) - onDeleteFn func(context.Context, uint64) error -} - -// Compile-time check that DataStoreAdapter implements header.Store -var _ header.Store[*types.Data] = (*DataStoreAdapter)(nil) - -// NewDataStoreAdapter creates a new DataStoreAdapter wrapping the given store. -func NewDataStoreAdapter(store Store) *DataStoreAdapter { - // Create LRU cache for pending data - ignore error as size is constant and valid - pendingCache, _ := lru.New[uint64, *types.Data](defaultPendingCacheSize) - - adapter := &DataStoreAdapter{ - store: store, - pendingData: pendingCache, - } - - // Initialize height from store - if h, err := store.Height(context.Background()); err == nil && h > 0 { - adapter.height.Store(h) - adapter.initialized = true - } - - return adapter -} - -// Start implements header.Store. It initializes the adapter if needed. -func (a *DataStoreAdapter) Start(ctx context.Context) error { - a.mu.Lock() - defer a.mu.Unlock() - - // Refresh height from store - h, err := a.store.Height(ctx) - if err != nil { - return err - } - - if h > 0 { - a.height.Store(h) - a.initialized = true - } - - return nil -} - -// Stop implements header.Store. No-op since the underlying store lifecycle -// is managed separately. -func (a *DataStoreAdapter) Stop(ctx context.Context) error { - return nil -} - -// Head returns the data for the highest block in the store. -func (a *DataStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.Data]) (*types.Data, error) { - // First check the store height - storeHeight, err := a.store.Height(ctx) - if err != nil && storeHeight == 0 { - // Check pending data - if a.pendingData.Len() == 0 { - return nil, header.ErrNotFound - } - - // Find the highest pending data - var maxHeight uint64 - var head *types.Data - for _, h := range a.pendingData.Keys() { - if d, ok := a.pendingData.Peek(h); ok && h > maxHeight { - maxHeight = h - head = d - } - } - if head != nil { - return head, nil - } - return nil, header.ErrNotFound - } - - // Check if we have higher pending data - var maxPending uint64 - var pendingHead *types.Data - for _, h := range a.pendingData.Keys() { - if d, ok := a.pendingData.Peek(h); ok && h > maxPending { - maxPending = h - pendingHead = d - } - } - - if maxPending > storeHeight && pendingHead != nil { - a.height.Store(maxPending) - return pendingHead, nil - } - - if storeHeight == 0 { - return nil, header.ErrNotFound - } - - a.height.Store(storeHeight) - _, data, err := a.store.GetBlockData(ctx, storeHeight) - if err != nil { - return nil, header.ErrNotFound - } - - return data, nil -} - -// Tail returns the data for the lowest block in the store. -// For ev-node, this is typically the genesis/initial height. -func (a *DataStoreAdapter) Tail(ctx context.Context) (*types.Data, error) { - height := a.height.Load() - if height == 0 { - // Check store - h, err := a.store.Height(ctx) - if err != nil || h == 0 { - return nil, header.ErrNotFound - } - height = h - } - - // Try height 1 first (most common case) - _, data, err := a.store.GetBlockData(ctx, 1) - if err == nil { - return data, nil - } - - // Check pending for height 1 - if pendingData, ok := a.pendingData.Peek(1); ok { - return pendingData, nil - } - - // Linear scan from 1 to current height to find first data - for h := uint64(2); h <= height; h++ { - _, data, err = a.store.GetBlockData(ctx, h) - if err == nil { - return data, nil - } - if pendingData, ok := a.pendingData.Peek(h); ok { - return pendingData, nil - } - } - - return nil, header.ErrNotFound -} - -// Get returns data by its hash. -func (a *DataStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.Data, error) { - // First try the store - _, data, err := a.store.GetBlockByHash(ctx, hash) - if err == nil { - return data, nil - } - - // Check pending data - note: this checks data hash, not header hash - for _, h := range a.pendingData.Keys() { - if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytes.Equal(pendingData.Hash(), hash) { - return pendingData, nil - } - } - - return nil, header.ErrNotFound -} - -// GetByHeight returns data at the given height. -func (a *DataStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.Data, error) { - // First try the store - _, data, err := a.store.GetBlockData(ctx, height) - if err == nil { - return data, nil - } - - // Check pending data - if pendingData, ok := a.pendingData.Peek(height); ok { - return pendingData, nil - } - - return nil, header.ErrNotFound -} - -// GetRangeByHeight returns data in the range [from.Height()+1, to). -// This follows go-header's convention where 'from' is the trusted data -// and we return data starting from the next height. -func (a *DataStoreAdapter) GetRangeByHeight(ctx context.Context, from *types.Data, to uint64) ([]*types.Data, error) { - if from == nil { - return nil, errors.New("from data cannot be nil") - } - - startHeight := from.Height() + 1 - if startHeight >= to { - return nil, nil - } - - return a.GetRange(ctx, startHeight, to) -} - -// GetRange returns data in the range [from, to). -func (a *DataStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*types.Data, error) { - if from >= to { - return nil, nil - } - - dataList := make([]*types.Data, 0, to-from) - for height := from; height < to; height++ { - data, err := a.GetByHeight(ctx, height) - if err != nil { - // Return what we have so far - if len(dataList) > 0 { - return dataList, nil - } - return nil, header.ErrNotFound - } - dataList = append(dataList, data) - } - - return dataList, nil -} - -// Has checks if data with the given hash exists. -func (a *DataStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { - // Check store first - _, _, err := a.store.GetBlockByHash(ctx, hash) - if err == nil { - return true, nil - } - - // Check pending data - for _, h := range a.pendingData.Keys() { - if pendingData, ok := a.pendingData.Peek(h); ok && pendingData != nil && bytes.Equal(pendingData.Hash(), hash) { - return true, nil - } - } - - return false, nil -} - -// HasAt checks if data exists at the given height. -func (a *DataStoreAdapter) HasAt(ctx context.Context, height uint64) bool { - // Check store first - _, _, err := a.store.GetBlockData(ctx, height) - if err == nil { - return true - } - - // Check pending data - return a.pendingData.Contains(height) -} - -// Height returns the current height of the store. -func (a *DataStoreAdapter) Height() uint64 { - // Check store first - if h, err := a.store.Height(context.Background()); err == nil && h > 0 { - // Also check pending for higher heights - maxPending := uint64(0) - for _, height := range a.pendingData.Keys() { - if height > maxPending { - maxPending = height - } - } - - if maxPending > h { - a.height.Store(maxPending) - return maxPending - } - a.height.Store(h) - return h - } - - // Fall back to cached height or check pending - height := a.height.Load() - if height > 0 { - return height - } - - for _, h := range a.pendingData.Keys() { - if h > height { - height = h - } - } - return height -} - -// Append stores data in the pending cache. -// This data is received via P2P and will be available for retrieval -// until the ev-node syncer processes and persists it to the store. -func (a *DataStoreAdapter) Append(ctx context.Context, dataList ...*types.Data) error { - if len(dataList) == 0 { - return nil - } - - for _, data := range dataList { - if data == nil || data.IsZero() { - continue - } - - height := data.Height() - - // Check if already in store - if _, _, err := a.store.GetBlockData(ctx, height); err == nil { - // Already persisted, skip - continue - } - - // Add to pending cache (LRU will evict oldest if full) - a.pendingData.Add(height, data) - - // Update cached height - if height > a.height.Load() { - a.height.Store(height) - } - } - - return nil -} - -// Init initializes the store with the first data. -// This is called by go-header when bootstrapping the store with trusted data. -func (a *DataStoreAdapter) Init(ctx context.Context, d *types.Data) error { - a.mu.Lock() - defer a.mu.Unlock() - - if a.initialized { - return nil - } - - if d == nil || d.IsZero() { - return nil - } - - // Add to pending cache (LRU will evict oldest if full) - a.pendingData.Add(d.Height(), d) - a.height.Store(d.Height()) - a.initialized = true - - return nil -} - -// Sync ensures all pending writes are flushed. -// No-op for the adapter as pending data is in-memory cache. -func (a *DataStoreAdapter) Sync(ctx context.Context) error { - return nil -} - -// DeleteRange deletes data in the range [from, to). -// This is used for rollback operations. -func (a *DataStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { - // Remove from pending cache - for height := from; height < to; height++ { - a.pendingData.Remove(height) - - if a.onDeleteFn != nil { - if err := a.onDeleteFn(ctx, height); err != nil { - return err - } - } - } - - // Update cached height if necessary - if from <= a.height.Load() { - a.height.Store(from - 1) - } - - return nil -} - -// OnDelete registers a callback to be invoked when data is deleted. -func (a *DataStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { - a.onDeleteFn = fn -} diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go index 2bc3ff5c3a..1b71a93ff5 100644 --- a/pkg/store/data_store_adapter_test.go +++ b/pkg/store/data_store_adapter_test.go @@ -10,9 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) +// testGenesisData returns a genesis with InitialHeight=1 for use in data adapter tests. +func testGenesisData() genesis.Genesis { + return genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now(), + } +} + // computeDataIndexHash computes the hash used for indexing in the store. // The store indexes by sha256(signedHeader.MarshalBinary()), so for data tests // we need to use the header hash from the saved block. @@ -30,7 +40,7 @@ func TestDataStoreAdapter_NewDataStoreAdapter(t *testing.T) { require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) require.NotNil(t, adapter) // Initially, height should be 0 @@ -48,7 +58,7 @@ func TestDataStoreAdapter_AppendAndRetrieve(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Create test data _, d1 := types.GetRandomBlock(1, 2, "test-chain") @@ -93,7 +103,7 @@ func TestDataStoreAdapter_GetFromStore(t *testing.T) { require.NoError(t, batch.Commit()) // Create adapter after data is in store - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Get by hash - need to use the index hash (sha256 of marshaled SignedHeader) hash := computeDataIndexHash(h1) @@ -122,7 +132,7 @@ func TestDataStoreAdapter_Has(t *testing.T) { require.NoError(t, batch.Commit()) // Create adapter after data is in store - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Has should return true for existing data - use index hash has, err := adapter.Has(ctx, computeDataIndexHash(h1)) @@ -142,7 +152,7 @@ func TestDataStoreAdapter_HasAt(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 2, "test-chain") require.NoError(t, adapter.Append(ctx, d1)) @@ -170,7 +180,7 @@ func TestDataStoreAdapter_HasAtFromStore(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // HasAt should return true for stored height assert.True(t, adapter.HasAt(ctx, 1)) @@ -186,7 +196,7 @@ func TestDataStoreAdapter_GetRange(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Create and append multiple data blocks to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") @@ -214,7 +224,7 @@ func TestDataStoreAdapter_GetRangeByHeight(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") @@ -236,7 +246,7 @@ func TestDataStoreAdapter_Init(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 1, "test-chain") @@ -265,7 +275,7 @@ func TestDataStoreAdapter_Tail(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Tail on empty store should return ErrNotFound _, err = adapter.Tail(ctx) @@ -297,7 +307,7 @@ func TestDataStoreAdapter_TailFromStore(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Tail should return the first data from store tail, err := adapter.Tail(ctx) @@ -312,7 +322,7 @@ func TestDataStoreAdapter_StartStop(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Start should not error err = adapter.Start(ctx) @@ -330,7 +340,7 @@ func TestDataStoreAdapter_DeleteRange(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") @@ -361,7 +371,7 @@ func TestDataStoreAdapter_OnDelete(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, d1 := types.GetRandomBlock(1, 1, "test-chain") _, d2 := types.GetRandomBlock(2, 1, "test-chain") @@ -396,7 +406,7 @@ func TestDataStoreAdapter_AppendSkipsExisting(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Append the same data again should not error (skips existing in store) err = adapter.Append(ctx, d1) @@ -413,7 +423,7 @@ func TestDataStoreAdapter_AppendNilData(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Append with nil and empty should not error err = adapter.Append(ctx) @@ -433,7 +443,7 @@ func TestDataStoreAdapter_Sync(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Sync should not error err = adapter.Sync(ctx) @@ -457,7 +467,7 @@ func TestDataStoreAdapter_HeightRefreshFromStore(t *testing.T) { require.NoError(t, batch.Commit()) // Create adapter - it should pick up the height from store - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) assert.Equal(t, uint64(1), adapter.Height()) } @@ -468,7 +478,7 @@ func TestDataStoreAdapter_GetByHeightNotFound(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) _, err = adapter.GetByHeight(ctx, 999) assert.ErrorIs(t, err, header.ErrNotFound) @@ -481,7 +491,7 @@ func TestDataStoreAdapter_InitWithNil(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Init with nil should not error but also not mark as initialized err = adapter.Init(ctx, nil) @@ -498,7 +508,7 @@ func TestDataStoreAdapter_ContextTimeout(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Create a context that's already canceled ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) @@ -520,7 +530,7 @@ func TestDataStoreAdapter_GetRangePartial(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Only append data for heights 1 and 2, not 3 _, d1 := types.GetRandomBlock(1, 1, "test-chain") @@ -542,7 +552,7 @@ func TestDataStoreAdapter_GetRangeEmpty(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // GetRange on empty store should return ErrNotFound _, err = adapter.GetRange(ctx, 1, 5) @@ -556,7 +566,7 @@ func TestDataStoreAdapter_MultipleAppends(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Append data in multiple batches _, d1 := types.GetRandomBlock(1, 1, "test-chain") @@ -584,7 +594,7 @@ func TestDataStoreAdapter_PendingAndStoreInteraction(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Add data to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") @@ -626,7 +636,7 @@ func TestDataStoreAdapter_HeadPrefersPending(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Add height 2 to pending _, d2 := types.GetRandomBlock(2, 1, "test-chain") @@ -645,7 +655,7 @@ func TestDataStoreAdapter_GetFromPendingByHash(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewDataStoreAdapter(store) + adapter := NewDataStoreAdapter(store, testGenesisData()) // Add data to pending _, d1 := types.GetRandomBlock(1, 1, "test-chain") diff --git a/pkg/store/header_store_adapter.go b/pkg/store/header_store_adapter.go deleted file mode 100644 index 6d6701a8c2..0000000000 --- a/pkg/store/header_store_adapter.go +++ /dev/null @@ -1,406 +0,0 @@ -package store - -import ( - "bytes" - "context" - "errors" - "sync" - "sync/atomic" - - "github.com/celestiaorg/go-header" - lru "github.com/hashicorp/golang-lru/v2" - - "github.com/evstack/ev-node/types" -) - -// defaultPendingCacheSize is the default size for the pending headers/data LRU cache. -const defaultPendingCacheSize = 1000 - -// HeaderStoreAdapter wraps Store to implement header.Store[*types.SignedHeader]. -// This allows the ev-node store to be used directly by go-header's P2P infrastructure, -// eliminating the need for a separate go-header store and reducing data duplication. -// -// The adapter maintains an in-memory cache for headers received via P2P (through Append). -// This cache allows the go-header syncer and P2P handler to access headers before they -// are validated and persisted by the ev-node syncer. Once the ev-node syncer processes -// a block, it writes to the underlying store, and subsequent reads will come from the store. -type HeaderStoreAdapter struct { - store Store - - // height caches the current height to avoid repeated context-based lookups. - // Updated on successful reads and writes. - height atomic.Uint64 - - // mu protects initialization state - mu sync.RWMutex - initialized bool - - // pendingHeaders is an LRU cache for headers received via Append that haven't been - // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. - pendingHeaders *lru.Cache[uint64, *types.SignedHeader] - - // onDeleteFn is called when headers are deleted (for rollback scenarios) - onDeleteFn func(context.Context, uint64) error -} - -// Compile-time check that HeaderStoreAdapter implements header.Store -var _ header.Store[*types.SignedHeader] = (*HeaderStoreAdapter)(nil) - -// NewHeaderStoreAdapter creates a new HeaderStoreAdapter wrapping the given store. -func NewHeaderStoreAdapter(store Store) *HeaderStoreAdapter { - // Create LRU cache for pending headers - ignore error as size is constant and valid - pendingCache, _ := lru.New[uint64, *types.SignedHeader](defaultPendingCacheSize) - - adapter := &HeaderStoreAdapter{ - store: store, - pendingHeaders: pendingCache, - } - - // Initialize height from store - if h, err := store.Height(context.Background()); err == nil && h > 0 { - adapter.height.Store(h) - adapter.initialized = true - } - - return adapter -} - -// Start implements header.Store. It initializes the adapter if needed. -func (a *HeaderStoreAdapter) Start(ctx context.Context) error { - a.mu.Lock() - defer a.mu.Unlock() - - // Refresh height from store - h, err := a.store.Height(ctx) - if err != nil { - return err - } - - if h > 0 { - a.height.Store(h) - a.initialized = true - } - - return nil -} - -// Stop implements header.Store. No-op since the underlying store lifecycle -// is managed separately. -func (a *HeaderStoreAdapter) Stop(ctx context.Context) error { - return nil -} - -// Head returns the highest header in the store. -func (a *HeaderStoreAdapter) Head(ctx context.Context, _ ...header.HeadOption[*types.SignedHeader]) (*types.SignedHeader, error) { - // First check the store height - storeHeight, err := a.store.Height(ctx) - if err != nil && storeHeight == 0 { - // Check pending headers - if a.pendingHeaders.Len() == 0 { - return nil, header.ErrNotFound - } - - // Find the highest pending header - var maxHeight uint64 - var head *types.SignedHeader - for _, h := range a.pendingHeaders.Keys() { - if hdr, ok := a.pendingHeaders.Peek(h); ok && h > maxHeight { - maxHeight = h - head = hdr - } - } - if head != nil { - return head, nil - } - return nil, header.ErrNotFound - } - - // Check if we have a higher pending header - var maxPending uint64 - var pendingHead *types.SignedHeader - for _, h := range a.pendingHeaders.Keys() { - if hdr, ok := a.pendingHeaders.Peek(h); ok && h > maxPending { - maxPending = h - pendingHead = hdr - } - } - - if maxPending > storeHeight && pendingHead != nil { - a.height.Store(maxPending) - return pendingHead, nil - } - - if storeHeight == 0 { - return nil, header.ErrNotFound - } - - a.height.Store(storeHeight) - hdr, err := a.store.GetHeader(ctx, storeHeight) - if err != nil { - return nil, header.ErrNotFound - } - - return hdr, nil -} - -// Tail returns the lowest header in the store. -// For ev-node, this is typically the genesis/initial height. -func (a *HeaderStoreAdapter) Tail(ctx context.Context) (*types.SignedHeader, error) { - height := a.height.Load() - if height == 0 { - // Check store - h, err := a.store.Height(ctx) - if err != nil || h == 0 { - return nil, header.ErrNotFound - } - height = h - } - - // Try height 1 first (most common case) - hdr, err := a.store.GetHeader(ctx, 1) - if err == nil { - return hdr, nil - } - - // Check pending for height 1 - if pendingHdr, ok := a.pendingHeaders.Peek(1); ok { - return pendingHdr, nil - } - - // Linear scan from 1 to current height to find first header - for h := uint64(2); h <= height; h++ { - hdr, err = a.store.GetHeader(ctx, h) - if err == nil { - return hdr, nil - } - if pendingHdr, ok := a.pendingHeaders.Peek(h); ok { - return pendingHdr, nil - } - } - - return nil, header.ErrNotFound -} - -// Get returns a header by its hash. -func (a *HeaderStoreAdapter) Get(ctx context.Context, hash header.Hash) (*types.SignedHeader, error) { - // First try the store - hdr, _, err := a.store.GetBlockByHash(ctx, hash) - if err == nil { - return hdr, nil - } - - // Check pending headers - for _, h := range a.pendingHeaders.Keys() { - if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytes.Equal(pendingHdr.Hash(), hash) { - return pendingHdr, nil - } - } - - return nil, header.ErrNotFound -} - -// GetByHeight returns a header at the given height. -func (a *HeaderStoreAdapter) GetByHeight(ctx context.Context, height uint64) (*types.SignedHeader, error) { - // First try the store - hdr, err := a.store.GetHeader(ctx, height) - if err == nil { - return hdr, nil - } - - // Check pending headers - if pendingHdr, ok := a.pendingHeaders.Peek(height); ok { - return pendingHdr, nil - } - - return nil, header.ErrNotFound -} - -// GetRangeByHeight returns headers in the range [from.Height()+1, to). -// This follows go-header's convention where 'from' is the trusted header -// and we return headers starting from the next height. -func (a *HeaderStoreAdapter) GetRangeByHeight(ctx context.Context, from *types.SignedHeader, to uint64) ([]*types.SignedHeader, error) { - if from == nil { - return nil, errors.New("from header cannot be nil") - } - - startHeight := from.Height() + 1 - if startHeight >= to { - return nil, nil - } - - return a.GetRange(ctx, startHeight, to) -} - -// GetRange returns headers in the range [from, to). -func (a *HeaderStoreAdapter) GetRange(ctx context.Context, from, to uint64) ([]*types.SignedHeader, error) { - if from >= to { - return nil, nil - } - - headers := make([]*types.SignedHeader, 0, to-from) - for height := from; height < to; height++ { - hdr, err := a.GetByHeight(ctx, height) - if err != nil { - // Return what we have so far - if len(headers) > 0 { - return headers, nil - } - return nil, header.ErrNotFound - } - headers = append(headers, hdr) - } - - return headers, nil -} - -// Has checks if a header with the given hash exists. -func (a *HeaderStoreAdapter) Has(ctx context.Context, hash header.Hash) (bool, error) { - // Check store first - _, _, err := a.store.GetBlockByHash(ctx, hash) - if err == nil { - return true, nil - } - - // Check pending headers - for _, h := range a.pendingHeaders.Keys() { - if pendingHdr, ok := a.pendingHeaders.Peek(h); ok && pendingHdr != nil && bytes.Equal(pendingHdr.Hash(), hash) { - return true, nil - } - } - - return false, nil -} - -// HasAt checks if a header exists at the given height. -func (a *HeaderStoreAdapter) HasAt(ctx context.Context, height uint64) bool { - // Check store first - _, err := a.store.GetHeader(ctx, height) - if err == nil { - return true - } - - // Check pending headers - return a.pendingHeaders.Contains(height) -} - -// Height returns the current height of the store. -func (a *HeaderStoreAdapter) Height() uint64 { - // Check store first - if h, err := a.store.Height(context.Background()); err == nil && h > 0 { - // Also check pending for higher heights - maxPending := uint64(0) - for _, height := range a.pendingHeaders.Keys() { - if height > maxPending { - maxPending = height - } - } - - if maxPending > h { - a.height.Store(maxPending) - return maxPending - } - a.height.Store(h) - return h - } - - // Fall back to cached height or check pending - height := a.height.Load() - if height > 0 { - return height - } - - for _, h := range a.pendingHeaders.Keys() { - if h > height { - height = h - } - } - return height -} - -// Append stores headers in the pending cache. -// These headers are received via P2P and will be available for retrieval -// until the ev-node syncer processes and persists them to the store. -func (a *HeaderStoreAdapter) Append(ctx context.Context, headers ...*types.SignedHeader) error { - if len(headers) == 0 { - return nil - } - - for _, hdr := range headers { - if hdr == nil || hdr.IsZero() { - continue - } - - height := hdr.Height() - - // Check if already in store - if _, err := a.store.GetHeader(ctx, height); err == nil { - // Already persisted, skip - continue - } - - // Add to pending cache (LRU will evict oldest if full) - a.pendingHeaders.Add(height, hdr) - - // Update cached height - if height > a.height.Load() { - a.height.Store(height) - } - } - - return nil -} - -// Init initializes the store with the first header. -// This is called by go-header when bootstrapping the store with a trusted header. -func (a *HeaderStoreAdapter) Init(ctx context.Context, h *types.SignedHeader) error { - a.mu.Lock() - defer a.mu.Unlock() - - if a.initialized { - return nil - } - - if h == nil || h.IsZero() { - return nil - } - - // Add to pending cache (LRU will evict oldest if full) - a.pendingHeaders.Add(h.Height(), h) - a.height.Store(h.Height()) - a.initialized = true - - return nil -} - -// Sync ensures all pending writes are flushed. -// No-op for the adapter as pending data is in-memory cache. -func (a *HeaderStoreAdapter) Sync(ctx context.Context) error { - return nil -} - -// DeleteRange deletes headers in the range [from, to). -// This is used for rollback operations. -func (a *HeaderStoreAdapter) DeleteRange(ctx context.Context, from, to uint64) error { - // Remove from pending cache - for height := from; height < to; height++ { - a.pendingHeaders.Remove(height) - - if a.onDeleteFn != nil { - if err := a.onDeleteFn(ctx, height); err != nil { - return err - } - } - } - - // Update cached height if necessary - if from <= a.height.Load() { - a.height.Store(from - 1) - } - - return nil -} - -// OnDelete registers a callback to be invoked when headers are deleted. -func (a *HeaderStoreAdapter) OnDelete(fn func(context.Context, uint64) error) { - a.onDeleteFn = fn -} diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go index 3300def67b..d26f51d76e 100644 --- a/pkg/store/header_store_adapter_test.go +++ b/pkg/store/header_store_adapter_test.go @@ -10,9 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) +// testGenesis returns a genesis with InitialHeight=1 for use in tests. +func testGenesis() genesis.Genesis { + return genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now(), + } +} + // computeHeaderIndexHash computes the hash used for indexing in the store. // The store indexes by sha256(signedHeader.MarshalBinary()), not signedHeader.Hash(). func computeHeaderIndexHash(h *types.SignedHeader) []byte { @@ -29,7 +39,7 @@ func TestHeaderStoreAdapter_NewHeaderStoreAdapter(t *testing.T) { require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) require.NotNil(t, adapter) // Initially, height should be 0 @@ -47,7 +57,7 @@ func TestHeaderStoreAdapter_AppendAndRetrieve(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Create test headers h1, _ := types.GetRandomBlock(1, 2, "test-chain") @@ -92,7 +102,7 @@ func TestHeaderStoreAdapter_GetFromStore(t *testing.T) { require.NoError(t, batch.Commit()) // Create adapter after data is in store - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Get by hash - need to use the index hash (sha256 of marshaled SignedHeader) hash := computeHeaderIndexHash(h1) @@ -121,7 +131,7 @@ func TestHeaderStoreAdapter_Has(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Has should return true for existing header - use index hash has, err := adapter.Has(ctx, computeHeaderIndexHash(h1)) @@ -141,7 +151,7 @@ func TestHeaderStoreAdapter_HasAt(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 2, "test-chain") require.NoError(t, adapter.Append(ctx, h1)) @@ -169,7 +179,7 @@ func TestHeaderStoreAdapter_HasAtFromStore(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // HasAt should return true for stored height assert.True(t, adapter.HasAt(ctx, 1)) @@ -185,7 +195,7 @@ func TestHeaderStoreAdapter_GetRange(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Create and append multiple headers to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") @@ -213,7 +223,7 @@ func TestHeaderStoreAdapter_GetRangeByHeight(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") @@ -235,7 +245,7 @@ func TestHeaderStoreAdapter_Init(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 1, "test-chain") @@ -264,7 +274,7 @@ func TestHeaderStoreAdapter_Tail(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Tail on empty store should return ErrNotFound _, err = adapter.Tail(ctx) @@ -296,7 +306,7 @@ func TestHeaderStoreAdapter_TailFromStore(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Tail should return the first header from store tail, err := adapter.Tail(ctx) @@ -311,7 +321,7 @@ func TestHeaderStoreAdapter_StartStop(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Start should not error err = adapter.Start(ctx) @@ -329,7 +339,7 @@ func TestHeaderStoreAdapter_DeleteRange(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") @@ -360,7 +370,7 @@ func TestHeaderStoreAdapter_OnDelete(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) h1, _ := types.GetRandomBlock(1, 1, "test-chain") h2, _ := types.GetRandomBlock(2, 1, "test-chain") @@ -395,7 +405,7 @@ func TestHeaderStoreAdapter_AppendSkipsExisting(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Append the same header again should not error (skips existing in store) err = adapter.Append(ctx, h1) @@ -412,7 +422,7 @@ func TestHeaderStoreAdapter_AppendNilHeaders(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Append with nil and empty should not error err = adapter.Append(ctx) @@ -432,7 +442,7 @@ func TestHeaderStoreAdapter_Sync(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Sync should not error err = adapter.Sync(ctx) @@ -456,7 +466,7 @@ func TestHeaderStoreAdapter_HeightRefreshFromStore(t *testing.T) { require.NoError(t, batch.Commit()) // Create adapter - it should pick up the height from store - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) assert.Equal(t, uint64(1), adapter.Height()) } @@ -467,7 +477,7 @@ func TestHeaderStoreAdapter_GetByHeightNotFound(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) _, err = adapter.GetByHeight(ctx, 999) assert.ErrorIs(t, err, header.ErrNotFound) @@ -480,7 +490,7 @@ func TestHeaderStoreAdapter_InitWithNil(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Init with nil should not error but also not mark as initialized err = adapter.Init(ctx, nil) @@ -497,7 +507,7 @@ func TestHeaderStoreAdapter_ContextTimeout(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Create a context that's already canceled ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) @@ -519,7 +529,7 @@ func TestHeaderStoreAdapter_PendingAndStoreInteraction(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Add header to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") @@ -561,7 +571,7 @@ func TestHeaderStoreAdapter_HeadPrefersPending(t *testing.T) { require.NoError(t, batch.SetHeight(1)) require.NoError(t, batch.Commit()) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Add height 2 to pending h2, _ := types.GetRandomBlock(2, 1, "test-chain") @@ -580,7 +590,7 @@ func TestHeaderStoreAdapter_GetFromPendingByHash(t *testing.T) { ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) - adapter := NewHeaderStoreAdapter(store) + adapter := NewHeaderStoreAdapter(store, testGenesis()) // Add header to pending h1, _ := types.GetRandomBlock(1, 1, "test-chain") diff --git a/pkg/store/store_adapter.go b/pkg/store/store_adapter.go new file mode 100644 index 0000000000..5685843cfd --- /dev/null +++ b/pkg/store/store_adapter.go @@ -0,0 +1,513 @@ +package store + +import ( + "bytes" + "context" + "sync" + "sync/atomic" + + "github.com/celestiaorg/go-header" + lru "github.com/hashicorp/golang-lru/v2" + + "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/types" +) + +// defaultPendingCacheSize is the default size for the pending headers/data LRU cache. +const defaultPendingCacheSize = 1000 + +// StoreGetter abstracts the store access methods for different types (headers vs data). +type StoreGetter[H header.Header[H]] interface { + // GetByHeight retrieves an item by its height. + GetByHeight(ctx context.Context, height uint64) (H, error) + // GetByHash retrieves an item by its hash. + GetByHash(ctx context.Context, hash []byte) (H, error) + // Height returns the current height of the store. + Height(ctx context.Context) (uint64, error) + // HasAt checks if an item exists at the given height. + HasAt(ctx context.Context, height uint64) bool +} + +// StoreAdapter is a generic adapter that wraps Store to implement header.Store[H]. +// This allows the ev-node store to be used directly by go-header's P2P infrastructure, +// eliminating the need for a separate go-header store and reducing data duplication. +// +// The adapter maintains an in-memory cache for items received via P2P (through Append). +// This cache allows the go-header syncer and P2P handler to access items before they +// are validated and persisted by the ev-node syncer. Once the ev-node syncer processes +// a block, it writes to the underlying store, and subsequent reads will come from the store. +type StoreAdapter[H header.Header[H]] struct { + getter StoreGetter[H] + genesis genesis.Genesis + + // height caches the current height to avoid repeated context-based lookups. + // Updated on successful reads and writes. + height atomic.Uint64 + + // mu protects initialization state + mu sync.RWMutex + initialized bool + + // pending is an LRU cache for items received via Append that haven't been + // written to the store yet. Keyed by height. Using LRU prevents unbounded growth. + pending *lru.Cache[uint64, H] + + // onDeleteFn is called when items are deleted (for rollback scenarios) + onDeleteFn func(context.Context, uint64) error +} + +// NewStoreAdapter creates a new StoreAdapter wrapping the given store getter. +// The genesis is used to determine the initial height for efficient Tail lookups. +func NewStoreAdapter[H header.Header[H]](getter StoreGetter[H], gen genesis.Genesis) *StoreAdapter[H] { + // Create LRU cache for pending items - ignore error as size is constant and valid + pendingCache, _ := lru.New[uint64, H](defaultPendingCacheSize) + + adapter := &StoreAdapter[H]{ + getter: getter, + genesis: gen, + pending: pendingCache, + } + + // Initialize height from store + if h, err := getter.Height(context.Background()); err == nil && h > 0 { + adapter.height.Store(h) + adapter.initialized = true + } + + return adapter +} + +// Start implements header.Store. It initializes the adapter if needed. +func (a *StoreAdapter[H]) Start(ctx context.Context) error { + a.mu.Lock() + defer a.mu.Unlock() + + // Refresh height from store + h, err := a.getter.Height(ctx) + if err != nil { + return err + } + + if h > 0 { + a.height.Store(h) + a.initialized = true + } + + return nil +} + +// Stop implements header.Store. No-op since the underlying store lifecycle +// is managed separately. +func (a *StoreAdapter[H]) Stop(ctx context.Context) error { + return nil +} + +// Head returns the highest item in the store. +func (a *StoreAdapter[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) (H, error) { + var zero H + + // First check the store height + storeHeight, err := a.getter.Height(ctx) + if err != nil && storeHeight == 0 { + // Check pending items + if a.pending.Len() == 0 { + return zero, header.ErrNotFound + } + + // Find the highest pending item + var maxHeight uint64 + var head H + for _, h := range a.pending.Keys() { + if item, ok := a.pending.Peek(h); ok && h > maxHeight { + maxHeight = h + head = item + } + } + if maxHeight > 0 { + return head, nil + } + return zero, header.ErrNotFound + } + + // Check if we have a higher pending item + var maxPending uint64 + var pendingHead H + for _, h := range a.pending.Keys() { + if item, ok := a.pending.Peek(h); ok && h > maxPending { + maxPending = h + pendingHead = item + } + } + + if maxPending > storeHeight && maxPending > 0 { + a.height.Store(maxPending) + return pendingHead, nil + } + + if storeHeight == 0 { + return zero, header.ErrNotFound + } + + a.height.Store(storeHeight) + item, err := a.getter.GetByHeight(ctx, storeHeight) + if err != nil { + return zero, header.ErrNotFound + } + + return item, nil +} + +// Tail returns the lowest item in the store. +// For ev-node, this is typically the genesis/initial height. +// If pruning has occurred, it walks up from initialHeight to find the first available item. +// TODO(@julienrbrt): Optimize this when pruning is enabled. +func (a *StoreAdapter[H]) Tail(ctx context.Context) (H, error) { + var zero H + + height := a.height.Load() + if height == 0 { + // Check store + h, err := a.getter.Height(ctx) + if err != nil || h == 0 { + return zero, header.ErrNotFound + } + height = h + } + + initialHeight := a.genesis.InitialHeight + if initialHeight == 0 { + initialHeight = 1 + } + + // Try initialHeight first (most common case - no pruning) + item, err := a.getter.GetByHeight(ctx, initialHeight) + if err == nil { + return item, nil + } + + // Check pending for initialHeight + if pendingItem, ok := a.pending.Peek(initialHeight); ok { + return pendingItem, nil + } + + // Walk up from initialHeight to find the first available item (pruning case) + for h := initialHeight + 1; h <= height; h++ { + item, err = a.getter.GetByHeight(ctx, h) + if err == nil { + return item, nil + } + if pendingItem, ok := a.pending.Peek(h); ok { + return pendingItem, nil + } + } + + return zero, header.ErrNotFound +} + +// Get returns an item by its hash. +func (a *StoreAdapter[H]) Get(ctx context.Context, hash header.Hash) (H, error) { + var zero H + + // First try the store + item, err := a.getter.GetByHash(ctx, hash) + if err == nil { + return item, nil + } + + // Check pending items + for _, h := range a.pending.Keys() { + if pendingItem, ok := a.pending.Peek(h); ok && !pendingItem.IsZero() && bytes.Equal(pendingItem.Hash(), hash) { + return pendingItem, nil + } + } + + return zero, header.ErrNotFound +} + +// GetByHeight returns an item at the given height. +func (a *StoreAdapter[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { + var zero H + + // First try the store + item, err := a.getter.GetByHeight(ctx, height) + if err == nil { + return item, nil + } + + // Check pending items + if pendingItem, ok := a.pending.Peek(height); ok { + return pendingItem, nil + } + + return zero, header.ErrNotFound +} + +// GetRangeByHeight returns items in the range [from.Height()+1, to). +// This follows go-header's convention where 'from' is the trusted item +// and we return items starting from the next height. +func (a *StoreAdapter[H]) GetRangeByHeight(ctx context.Context, from H, to uint64) ([]H, error) { + if from.IsZero() { + return nil, header.ErrNotFound + } + + startHeight := from.Height() + 1 + if startHeight >= to { + return nil, nil + } + + return a.GetRange(ctx, startHeight, to) +} + +// GetRange returns items in the range [from, to). +func (a *StoreAdapter[H]) GetRange(ctx context.Context, from, to uint64) ([]H, error) { + if from >= to { + return nil, nil + } + + items := make([]H, 0, to-from) + for height := from; height < to; height++ { + item, err := a.GetByHeight(ctx, height) + if err != nil { + // Return what we have so far + if len(items) > 0 { + return items, nil + } + return nil, header.ErrNotFound + } + items = append(items, item) + } + + return items, nil +} + +// Has checks if an item with the given hash exists. +func (a *StoreAdapter[H]) Has(ctx context.Context, hash header.Hash) (bool, error) { + // Check store first + _, err := a.getter.GetByHash(ctx, hash) + if err == nil { + return true, nil + } + + // Check pending items + for _, h := range a.pending.Keys() { + if pendingItem, ok := a.pending.Peek(h); ok && !pendingItem.IsZero() && bytes.Equal(pendingItem.Hash(), hash) { + return true, nil + } + } + + return false, nil +} + +// HasAt checks if an item exists at the given height. +func (a *StoreAdapter[H]) HasAt(ctx context.Context, height uint64) bool { + // Check store first + if a.getter.HasAt(ctx, height) { + return true + } + + // Check pending items + return a.pending.Contains(height) +} + +// Height returns the current height of the store. +func (a *StoreAdapter[H]) Height() uint64 { + // Check store first + if h, err := a.getter.Height(context.Background()); err == nil && h > 0 { + // Also check pending for higher heights + maxPending := uint64(0) + for _, height := range a.pending.Keys() { + if height > maxPending { + maxPending = height + } + } + + if maxPending > h { + a.height.Store(maxPending) + return maxPending + } + a.height.Store(h) + return h + } + + // Fall back to cached height or check pending + height := a.height.Load() + if height > 0 { + return height + } + + for _, h := range a.pending.Keys() { + if h > height { + height = h + } + } + return height +} + +// Append stores items in the pending cache. +// These items are received via P2P and will be available for retrieval +// until the ev-node syncer processes and persists them to the store. +func (a *StoreAdapter[H]) Append(ctx context.Context, items ...H) error { + if len(items) == 0 { + return nil + } + + for _, item := range items { + if item.IsZero() { + continue + } + + height := item.Height() + + // Check if already in store + if a.getter.HasAt(ctx, height) { + // Already persisted, skip + continue + } + + // Add to pending cache (LRU will evict oldest if full) + a.pending.Add(height, item) + + // Update cached height + if height > a.height.Load() { + a.height.Store(height) + } + } + + return nil +} + +// Init initializes the store with the first item. +// This is called by go-header when bootstrapping the store with a trusted item. +func (a *StoreAdapter[H]) Init(ctx context.Context, item H) error { + a.mu.Lock() + defer a.mu.Unlock() + + if a.initialized { + return nil + } + + if item.IsZero() { + return nil + } + + // Add to pending cache (LRU will evict oldest if full) + a.pending.Add(item.Height(), item) + a.height.Store(item.Height()) + a.initialized = true + + return nil +} + +// Sync ensures all pending writes are flushed. +// No-op for the adapter as pending data is in-memory cache. +func (a *StoreAdapter[H]) Sync(ctx context.Context) error { + return nil +} + +// DeleteRange deletes items in the range [from, to). +// This is used for rollback operations. +func (a *StoreAdapter[H]) DeleteRange(ctx context.Context, from, to uint64) error { + // Remove from pending cache + for height := from; height < to; height++ { + a.pending.Remove(height) + + if a.onDeleteFn != nil { + if err := a.onDeleteFn(ctx, height); err != nil { + return err + } + } + } + + // Update cached height if necessary + if from <= a.height.Load() { + a.height.Store(from - 1) + } + + return nil +} + +// OnDelete registers a callback to be invoked when items are deleted. +func (a *StoreAdapter[H]) OnDelete(fn func(context.Context, uint64) error) { + a.onDeleteFn = fn +} + +// HeaderStoreGetter implements StoreGetter for *types.SignedHeader. +type HeaderStoreGetter struct { + store Store +} + +// NewHeaderStoreGetter creates a new HeaderStoreGetter. +func NewHeaderStoreGetter(store Store) *HeaderStoreGetter { + return &HeaderStoreGetter{store: store} +} + +// GetByHeight implements StoreGetter. +func (g *HeaderStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.SignedHeader, error) { + return g.store.GetHeader(ctx, height) +} + +// GetByHash implements StoreGetter. +func (g *HeaderStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.SignedHeader, error) { + hdr, _, err := g.store.GetBlockByHash(ctx, hash) + return hdr, err +} + +// Height implements StoreGetter. +func (g *HeaderStoreGetter) Height(ctx context.Context) (uint64, error) { + return g.store.Height(ctx) +} + +// HasAt implements StoreGetter. +func (g *HeaderStoreGetter) HasAt(ctx context.Context, height uint64) bool { + _, err := g.store.GetHeader(ctx, height) + return err == nil +} + +// DataStoreGetter implements StoreGetter for *types.Data. +type DataStoreGetter struct { + store Store +} + +// NewDataStoreGetter creates a new DataStoreGetter. +func NewDataStoreGetter(store Store) *DataStoreGetter { + return &DataStoreGetter{store: store} +} + +// GetByHeight implements StoreGetter. +func (g *DataStoreGetter) GetByHeight(ctx context.Context, height uint64) (*types.Data, error) { + _, data, err := g.store.GetBlockData(ctx, height) + return data, err +} + +// GetByHash implements StoreGetter. +func (g *DataStoreGetter) GetByHash(ctx context.Context, hash []byte) (*types.Data, error) { + _, data, err := g.store.GetBlockByHash(ctx, hash) + return data, err +} + +// Height implements StoreGetter. +func (g *DataStoreGetter) Height(ctx context.Context) (uint64, error) { + return g.store.Height(ctx) +} + +// HasAt implements StoreGetter. +func (g *DataStoreGetter) HasAt(ctx context.Context, height uint64) bool { + _, _, err := g.store.GetBlockData(ctx, height) + return err == nil +} + +// Type aliases for convenience +type HeaderStoreAdapter = StoreAdapter[*types.SignedHeader] +type DataStoreAdapter = StoreAdapter[*types.Data] + +// NewHeaderStoreAdapter creates a new StoreAdapter for headers. +// The genesis is used to determine the initial height for efficient Tail lookups. +func NewHeaderStoreAdapter(store Store, gen genesis.Genesis) *HeaderStoreAdapter { + return NewStoreAdapter[*types.SignedHeader](NewHeaderStoreGetter(store), gen) +} + +// NewDataStoreAdapter creates a new StoreAdapter for data. +// The genesis is used to determine the initial height for efficient Tail lookups. +func NewDataStoreAdapter(store Store, gen genesis.Genesis) *DataStoreAdapter { + return NewStoreAdapter[*types.Data](NewDataStoreGetter(store), gen) +} diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 21184a5136..66af9324e5 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -72,7 +72,7 @@ func NewDataSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*DataSyncService, error) { - storeAdapter := store.NewDataStoreAdapter(evStore) + storeAdapter := store.NewDataStoreAdapter(evStore, genesis) return newSyncService[*types.Data](storeAdapter, dataSync, conf, genesis, p2p, logger) } @@ -85,7 +85,7 @@ func NewHeaderSyncService( logger zerolog.Logger, ) (*HeaderSyncService, error) { - storeAdapter := store.NewHeaderStoreAdapter(evStore) + storeAdapter := store.NewHeaderStoreAdapter(evStore, genesis) return newSyncService[*types.SignedHeader](storeAdapter, headerSync, conf, genesis, p2p, logger) } From 70dc901bce9431821a003a8a4a65e41bd15f5cb9 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:16:58 +0100 Subject: [PATCH 10/21] lint --- pkg/sync/sync_service.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index 66af9324e5..78e74c31e7 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -73,7 +73,7 @@ func NewDataSyncService( logger zerolog.Logger, ) (*DataSyncService, error) { storeAdapter := store.NewDataStoreAdapter(evStore, genesis) - return newSyncService[*types.Data](storeAdapter, dataSync, conf, genesis, p2p, logger) + return newSyncService(storeAdapter, dataSync, conf, genesis, p2p, logger) } // NewHeaderSyncService returns a new HeaderSyncService. @@ -84,9 +84,8 @@ func NewHeaderSyncService( p2p *p2p.Client, logger zerolog.Logger, ) (*HeaderSyncService, error) { - storeAdapter := store.NewHeaderStoreAdapter(evStore, genesis) - return newSyncService[*types.SignedHeader](storeAdapter, headerSync, conf, genesis, p2p, logger) + return newSyncService(storeAdapter, headerSync, conf, genesis, p2p, logger) } func newSyncService[H header.Header[H]]( From 8106374da6e562436c67bb9bb5718d60ebaff475 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:35:06 +0100 Subject: [PATCH 11/21] simplify code --- apps/evm/cmd/rollback.go | 12 +++---- apps/testapp/cmd/rollback.go | 9 ++--- pkg/store/store_adapter.go | 69 +++++++++++++++--------------------- 3 files changed, 36 insertions(+), 54 deletions(-) diff --git a/apps/evm/cmd/rollback.go b/apps/evm/cmd/rollback.go index 65ceb768cf..3f11ef8d4f 100644 --- a/apps/evm/cmd/rollback.go +++ b/apps/evm/cmd/rollback.go @@ -3,7 +3,6 @@ package cmd import ( "bytes" "context" - "errors" "fmt" "os" @@ -61,14 +60,12 @@ func NewRollbackCmd() *cobra.Command { height = currentHeight - 1 } - var errs error - // rollback ev-node main state // Note: With the unified store approach, the ev-node store is the single source of truth. // The store adapters (HeaderStoreAdapter/DataStoreAdapter) read from this store, // so rolling back the ev-node store automatically affects P2P sync operations. if err := evolveStore.Rollback(goCtx, height, !syncNode); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback ev-node state: %w", err)) + return fmt.Errorf("failed to rollback ev-node state: %w", err) } // rollback execution layer via EngineClient @@ -77,10 +74,9 @@ func NewRollbackCmd() *cobra.Command { cmd.Printf("Warning: failed to create engine client, skipping EL rollback: %v\n", err) } else { if err := engineClient.Rollback(goCtx, height); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback execution layer: %w", err)) - } else { - cmd.Printf("Rolled back execution layer to height %d\n", height) + return fmt.Errorf("failed to rollback execution layer: %w", err) } + cmd.Printf("Rolled back execution layer to height %d\n", height) } cmd.Printf("Rolled back ev-node state to height %d\n", height) @@ -88,7 +84,7 @@ func NewRollbackCmd() *cobra.Command { fmt.Println("Restart the node with the `--evnode.clear_cache` flag") } - return errs + return nil }, } diff --git a/apps/testapp/cmd/rollback.go b/apps/testapp/cmd/rollback.go index 2af9bb04df..6326f0fd27 100644 --- a/apps/testapp/cmd/rollback.go +++ b/apps/testapp/cmd/rollback.go @@ -2,7 +2,6 @@ package cmd import ( "context" - "errors" "fmt" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" @@ -62,19 +61,17 @@ func NewRollbackCmd() *cobra.Command { return err } - var errs error - // rollback ev-node main state // Note: With the unified store approach, the ev-node store is the single source of truth. // The store adapters (HeaderStoreAdapter/DataStoreAdapter) read from this store, // so rolling back the ev-node store automatically affects P2P sync operations. if err := evolveStore.Rollback(goCtx, height, !syncNode); err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to rollback ev-node state: %w", err)) + return fmt.Errorf("failed to rollback ev-node state: %w", err) } // rollback execution store if err := executor.Rollback(goCtx, height); err != nil { - errs = errors.Join(errs, fmt.Errorf("rollback failed: %w", err)) + return fmt.Errorf("failed to rollback executor state: %w", err) } fmt.Printf("Rolled back ev-node state to height %d\n", height) @@ -82,7 +79,7 @@ func NewRollbackCmd() *cobra.Command { fmt.Println("Restart the node with the `--evnode.clear_cache` flag") } - return errs + return nil }, } diff --git a/pkg/store/store_adapter.go b/pkg/store/store_adapter.go index 5685843cfd..d5f170d6f3 100644 --- a/pkg/store/store_adapter.go +++ b/pkg/store/store_adapter.go @@ -102,59 +102,48 @@ func (a *StoreAdapter[H]) Stop(ctx context.Context) error { return nil } +// pendingHead returns the highest item in the pending cache and its height. +// Returns zero value and 0 if pending cache is empty. +func (a *StoreAdapter[H]) pendingHead() (H, uint64) { + var maxHeight uint64 + var head H + for _, h := range a.pending.Keys() { + if item, ok := a.pending.Peek(h); ok && h > maxHeight { + maxHeight = h + head = item + } + } + return head, maxHeight +} + // Head returns the highest item in the store. func (a *StoreAdapter[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) (H, error) { var zero H - // First check the store height - storeHeight, err := a.getter.Height(ctx) - if err != nil && storeHeight == 0 { - // Check pending items - if a.pending.Len() == 0 { - return zero, header.ErrNotFound - } + storeHeight, _ := a.getter.Height(ctx) + pendingHead, pendingHeight := a.pendingHead() - // Find the highest pending item - var maxHeight uint64 - var head H - for _, h := range a.pending.Keys() { - if item, ok := a.pending.Peek(h); ok && h > maxHeight { - maxHeight = h - head = item - } - } - if maxHeight > 0 { - return head, nil - } - return zero, header.ErrNotFound + // Prefer pending if it's higher than store + if pendingHeight > storeHeight { + a.height.Store(pendingHeight) + return pendingHead, nil } - // Check if we have a higher pending item - var maxPending uint64 - var pendingHead H - for _, h := range a.pending.Keys() { - if item, ok := a.pending.Peek(h); ok && h > maxPending { - maxPending = h - pendingHead = item + // Try to get from store + if storeHeight > 0 { + a.height.Store(storeHeight) + if item, err := a.getter.GetByHeight(ctx, storeHeight); err == nil { + return item, nil } } - if maxPending > storeHeight && maxPending > 0 { - a.height.Store(maxPending) + // Fall back to pending if store failed + if pendingHeight > 0 { + a.height.Store(pendingHeight) return pendingHead, nil } - if storeHeight == 0 { - return zero, header.ErrNotFound - } - - a.height.Store(storeHeight) - item, err := a.getter.GetByHeight(ctx, storeHeight) - if err != nil { - return zero, header.ErrNotFound - } - - return item, nil + return zero, header.ErrNotFound } // Tail returns the lowest item in the store. From 25cc36be921d3e0e21ac245424fd50aa4cdd2cf1 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:48:40 +0100 Subject: [PATCH 12/21] wait for height --- pkg/store/data_store_adapter_test.go | 22 ++-- pkg/store/header_store_adapter_test.go | 7 +- pkg/store/store_adapter.go | 136 ++++++++++++++++++++++--- 3 files changed, 141 insertions(+), 24 deletions(-) diff --git a/pkg/store/data_store_adapter_test.go b/pkg/store/data_store_adapter_test.go index 1b71a93ff5..a43e7838e9 100644 --- a/pkg/store/data_store_adapter_test.go +++ b/pkg/store/data_store_adapter_test.go @@ -3,6 +3,7 @@ package store import ( "context" "crypto/sha256" + "errors" "testing" "time" @@ -473,7 +474,9 @@ func TestDataStoreAdapter_HeightRefreshFromStore(t *testing.T) { func TestDataStoreAdapter_GetByHeightNotFound(t *testing.T) { t.Parallel() - ctx := context.Background() + // Use a short timeout since GetByHeight now blocks waiting for the height + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) @@ -481,7 +484,8 @@ func TestDataStoreAdapter_GetByHeightNotFound(t *testing.T) { adapter := NewDataStoreAdapter(store, testGenesisData()) _, err = adapter.GetByHeight(ctx, 999) - assert.ErrorIs(t, err, header.ErrNotFound) + // GetByHeight now blocks until the height is available or context is canceled + assert.ErrorIs(t, err, context.DeadlineExceeded) } func TestDataStoreAdapter_InitWithNil(t *testing.T) { @@ -525,7 +529,9 @@ func TestDataStoreAdapter_ContextTimeout(t *testing.T) { func TestDataStoreAdapter_GetRangePartial(t *testing.T) { t.Parallel() - ctx := context.Background() + // Use a short timeout since GetByHeight now blocks waiting for the height + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) @@ -547,16 +553,20 @@ func TestDataStoreAdapter_GetRangePartial(t *testing.T) { func TestDataStoreAdapter_GetRangeEmpty(t *testing.T) { t.Parallel() - ctx := context.Background() + // Use a short timeout since GetByHeight now blocks waiting for the height + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) store := New(ds) adapter := NewDataStoreAdapter(store, testGenesisData()) - // GetRange on empty store should return ErrNotFound + // GetRange on empty store will block until context timeout _, err = adapter.GetRange(ctx, 1, 5) - assert.ErrorIs(t, err, header.ErrNotFound) + // GetByHeight now blocks - we may get context.DeadlineExceeded or ErrNotFound depending on timing + assert.True(t, errors.Is(err, context.DeadlineExceeded) || errors.Is(err, header.ErrNotFound), + "expected DeadlineExceeded or ErrNotFound, got: %v", err) } func TestDataStoreAdapter_MultipleAppends(t *testing.T) { diff --git a/pkg/store/header_store_adapter_test.go b/pkg/store/header_store_adapter_test.go index d26f51d76e..bb1a281936 100644 --- a/pkg/store/header_store_adapter_test.go +++ b/pkg/store/header_store_adapter_test.go @@ -472,7 +472,9 @@ func TestHeaderStoreAdapter_HeightRefreshFromStore(t *testing.T) { func TestHeaderStoreAdapter_GetByHeightNotFound(t *testing.T) { t.Parallel() - ctx := context.Background() + // Use a short timeout since GetByHeight now blocks waiting for the height + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() ds, err := NewTestInMemoryKVStore() require.NoError(t, err) @@ -480,7 +482,8 @@ func TestHeaderStoreAdapter_GetByHeightNotFound(t *testing.T) { adapter := NewHeaderStoreAdapter(store, testGenesis()) _, err = adapter.GetByHeight(ctx, 999) - assert.ErrorIs(t, err, header.ErrNotFound) + // GetByHeight now blocks until the height is available or context is canceled + assert.ErrorIs(t, err, context.DeadlineExceeded) } func TestHeaderStoreAdapter_InitWithNil(t *testing.T) { diff --git a/pkg/store/store_adapter.go b/pkg/store/store_adapter.go index d5f170d6f3..1dc46ddea1 100644 --- a/pkg/store/store_adapter.go +++ b/pkg/store/store_adapter.go @@ -3,6 +3,7 @@ package store import ( "bytes" "context" + "errors" "sync" "sync/atomic" @@ -13,6 +14,9 @@ import ( "github.com/evstack/ev-node/types" ) +// errElapsedHeight is returned when the requested height was already stored. +var errElapsedHeight = errors.New("elapsed height") + // defaultPendingCacheSize is the default size for the pending headers/data LRU cache. const defaultPendingCacheSize = 1000 @@ -28,6 +32,77 @@ type StoreGetter[H header.Header[H]] interface { HasAt(ctx context.Context, height uint64) bool } +// heightSub provides a mechanism for waiting on a specific height to be stored. +// This is critical for go-header syncer which expects GetByHeight to block until +// the requested height is available. +type heightSub struct { + height atomic.Uint64 + heightMu sync.Mutex + heightChs map[uint64][]chan struct{} +} + +func newHeightSub(initialHeight uint64) *heightSub { + hs := &heightSub{ + heightChs: make(map[uint64][]chan struct{}), + } + hs.height.Store(initialHeight) + return hs +} + +// Height returns the current height. +func (hs *heightSub) Height() uint64 { + return hs.height.Load() +} + +// SetHeight updates the current height and notifies any waiters. +func (hs *heightSub) SetHeight(h uint64) { + hs.height.Store(h) + hs.notifyUpTo(h) +} + +// Wait blocks until the given height is reached or context is canceled. +// Returns errElapsedHeight if the height was already reached. +func (hs *heightSub) Wait(ctx context.Context, height uint64) error { + // Fast path: height already reached + if hs.height.Load() >= height { + return errElapsedHeight + } + + hs.heightMu.Lock() + // Double-check after acquiring lock + if hs.height.Load() >= height { + hs.heightMu.Unlock() + return errElapsedHeight + } + + // Create a channel to wait on + ch := make(chan struct{}) + hs.heightChs[height] = append(hs.heightChs[height], ch) + hs.heightMu.Unlock() + + select { + case <-ch: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// notifyUpTo notifies all waiters for heights <= h. +func (hs *heightSub) notifyUpTo(h uint64) { + hs.heightMu.Lock() + defer hs.heightMu.Unlock() + + for height, chs := range hs.heightChs { + if height <= h { + for _, ch := range chs { + close(ch) + } + delete(hs.heightChs, height) + } + } +} + // StoreAdapter is a generic adapter that wraps Store to implement header.Store[H]. // This allows the ev-node store to be used directly by go-header's P2P infrastructure, // eliminating the need for a separate go-header store and reducing data duplication. @@ -37,13 +112,17 @@ type StoreGetter[H header.Header[H]] interface { // are validated and persisted by the ev-node syncer. Once the ev-node syncer processes // a block, it writes to the underlying store, and subsequent reads will come from the store. type StoreAdapter[H header.Header[H]] struct { - getter StoreGetter[H] - genesis genesis.Genesis + getter StoreGetter[H] + initialHeight uint64 // height caches the current height to avoid repeated context-based lookups. // Updated on successful reads and writes. height atomic.Uint64 + // heightSub allows waiting for specific heights to be stored. + // This is required by go-header syncer for blocking GetByHeight. + heightSub *heightSub + // mu protects initialization state mu sync.RWMutex initialized bool @@ -62,15 +141,22 @@ func NewStoreAdapter[H header.Header[H]](getter StoreGetter[H], gen genesis.Gene // Create LRU cache for pending items - ignore error as size is constant and valid pendingCache, _ := lru.New[uint64, H](defaultPendingCacheSize) + // Get initial height from store + initialHeight := gen.InitialHeight + if h, err := getter.Height(context.Background()); err == nil && h > 0 { + initialHeight = h + } + adapter := &StoreAdapter[H]{ - getter: getter, - genesis: gen, - pending: pendingCache, + getter: getter, + initialHeight: initialHeight, + pending: pendingCache, + heightSub: newHeightSub(initialHeight), } // Initialize height from store - if h, err := getter.Height(context.Background()); err == nil && h > 0 { - adapter.height.Store(h) + if initialHeight > 0 { + adapter.height.Store(initialHeight) adapter.initialized = true } @@ -90,6 +176,7 @@ func (a *StoreAdapter[H]) Start(ctx context.Context) error { if h > 0 { a.height.Store(h) + a.heightSub.SetHeight(h) a.initialized = true } @@ -163,24 +250,19 @@ func (a *StoreAdapter[H]) Tail(ctx context.Context) (H, error) { height = h } - initialHeight := a.genesis.InitialHeight - if initialHeight == 0 { - initialHeight = 1 - } - // Try initialHeight first (most common case - no pruning) - item, err := a.getter.GetByHeight(ctx, initialHeight) + item, err := a.getter.GetByHeight(ctx, a.initialHeight) if err == nil { return item, nil } // Check pending for initialHeight - if pendingItem, ok := a.pending.Peek(initialHeight); ok { + if pendingItem, ok := a.pending.Peek(a.initialHeight); ok { return pendingItem, nil } // Walk up from initialHeight to find the first available item (pruning case) - for h := initialHeight + 1; h <= height; h++ { + for h := a.initialHeight + 1; h <= height; h++ { item, err = a.getter.GetByHeight(ctx, h) if err == nil { return item, nil @@ -214,9 +296,29 @@ func (a *StoreAdapter[H]) Get(ctx context.Context, hash header.Hash) (H, error) } // GetByHeight returns an item at the given height. +// If the height is not yet available, it blocks until it is or context is canceled. func (a *StoreAdapter[H]) GetByHeight(ctx context.Context, height uint64) (H, error) { var zero H + // Try to get the item first + if item, err := a.getByHeightNoWait(ctx, height); err == nil { + return item, nil + } + + // If not found, wait for the height to be stored + err := a.heightSub.Wait(ctx, height) + if err != nil && !errors.Is(err, errElapsedHeight) { + return zero, err + } + + // Try again after waiting + return a.getByHeightNoWait(ctx, height) +} + +// getByHeightNoWait returns an item at the given height without blocking. +func (a *StoreAdapter[H]) getByHeightNoWait(ctx context.Context, height uint64) (H, error) { + var zero H + // First try the store item, err := a.getter.GetByHeight(ctx, height) if err == nil { @@ -356,9 +458,10 @@ func (a *StoreAdapter[H]) Append(ctx context.Context, items ...H) error { // Add to pending cache (LRU will evict oldest if full) a.pending.Add(height, item) - // Update cached height + // Update cached height and notify waiters if height > a.height.Load() { a.height.Store(height) + a.heightSub.SetHeight(height) } } @@ -382,6 +485,7 @@ func (a *StoreAdapter[H]) Init(ctx context.Context, item H) error { // Add to pending cache (LRU will evict oldest if full) a.pending.Add(item.Height(), item) a.height.Store(item.Height()) + a.heightSub.SetHeight(item.Height()) a.initialized = true return nil From dfe107147ab77e7b60831a3eb0375fa4a7a6cafe Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 30 Jan 2026 17:54:02 +0100 Subject: [PATCH 13/21] cleanup duplicates --- pkg/store/store_adapter.go | 66 +++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 37 deletions(-) diff --git a/pkg/store/store_adapter.go b/pkg/store/store_adapter.go index 1dc46ddea1..79571c20f7 100644 --- a/pkg/store/store_adapter.go +++ b/pkg/store/store_adapter.go @@ -112,14 +112,10 @@ func (hs *heightSub) notifyUpTo(h uint64) { // are validated and persisted by the ev-node syncer. Once the ev-node syncer processes // a block, it writes to the underlying store, and subsequent reads will come from the store. type StoreAdapter[H header.Header[H]] struct { - getter StoreGetter[H] - initialHeight uint64 + getter StoreGetter[H] + genesisInitialHeight uint64 - // height caches the current height to avoid repeated context-based lookups. - // Updated on successful reads and writes. - height atomic.Uint64 - - // heightSub allows waiting for specific heights to be stored. + // heightSub tracks the current height and allows waiting for specific heights. // This is required by go-header syncer for blocking GetByHeight. heightSub *heightSub @@ -141,22 +137,21 @@ func NewStoreAdapter[H header.Header[H]](getter StoreGetter[H], gen genesis.Gene // Create LRU cache for pending items - ignore error as size is constant and valid pendingCache, _ := lru.New[uint64, H](defaultPendingCacheSize) - // Get initial height from store - initialHeight := gen.InitialHeight - if h, err := getter.Height(context.Background()); err == nil && h > 0 { - initialHeight = h + // Get actual current height from store (0 if empty) + var storeHeight uint64 + if h, err := getter.Height(context.Background()); err == nil { + storeHeight = h } adapter := &StoreAdapter[H]{ - getter: getter, - initialHeight: initialHeight, - pending: pendingCache, - heightSub: newHeightSub(initialHeight), + getter: getter, + genesisInitialHeight: max(gen.InitialHeight, 1), + pending: pendingCache, + heightSub: newHeightSub(storeHeight), } - // Initialize height from store - if initialHeight > 0 { - adapter.height.Store(initialHeight) + // Mark as initialized if we have data + if storeHeight > 0 { adapter.initialized = true } @@ -175,7 +170,6 @@ func (a *StoreAdapter[H]) Start(ctx context.Context) error { } if h > 0 { - a.height.Store(h) a.heightSub.SetHeight(h) a.initialized = true } @@ -212,13 +206,13 @@ func (a *StoreAdapter[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) ( // Prefer pending if it's higher than store if pendingHeight > storeHeight { - a.height.Store(pendingHeight) + a.heightSub.SetHeight(pendingHeight) return pendingHead, nil } // Try to get from store if storeHeight > 0 { - a.height.Store(storeHeight) + a.heightSub.SetHeight(storeHeight) if item, err := a.getter.GetByHeight(ctx, storeHeight); err == nil { return item, nil } @@ -226,7 +220,7 @@ func (a *StoreAdapter[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) ( // Fall back to pending if store failed if pendingHeight > 0 { - a.height.Store(pendingHeight) + a.heightSub.SetHeight(pendingHeight) return pendingHead, nil } @@ -240,7 +234,7 @@ func (a *StoreAdapter[H]) Head(ctx context.Context, _ ...header.HeadOption[H]) ( func (a *StoreAdapter[H]) Tail(ctx context.Context) (H, error) { var zero H - height := a.height.Load() + height := a.heightSub.Height() if height == 0 { // Check store h, err := a.getter.Height(ctx) @@ -250,19 +244,19 @@ func (a *StoreAdapter[H]) Tail(ctx context.Context) (H, error) { height = h } - // Try initialHeight first (most common case - no pruning) - item, err := a.getter.GetByHeight(ctx, a.initialHeight) + // Try genesisInitialHeight first (most common case - no pruning) + item, err := a.getter.GetByHeight(ctx, a.genesisInitialHeight) if err == nil { return item, nil } - // Check pending for initialHeight - if pendingItem, ok := a.pending.Peek(a.initialHeight); ok { + // Check pending for genesisInitialHeight + if pendingItem, ok := a.pending.Peek(a.genesisInitialHeight); ok { return pendingItem, nil } - // Walk up from initialHeight to find the first available item (pruning case) - for h := a.initialHeight + 1; h <= height; h++ { + // Walk up from genesisInitialHeight to find the first available item (pruning case) + for h := a.genesisInitialHeight + 1; h <= height; h++ { item, err = a.getter.GetByHeight(ctx, h) if err == nil { return item, nil @@ -413,15 +407,15 @@ func (a *StoreAdapter[H]) Height() uint64 { } if maxPending > h { - a.height.Store(maxPending) + a.heightSub.SetHeight(maxPending) return maxPending } - a.height.Store(h) + a.heightSub.SetHeight(h) return h } // Fall back to cached height or check pending - height := a.height.Load() + height := a.heightSub.Height() if height > 0 { return height } @@ -459,8 +453,7 @@ func (a *StoreAdapter[H]) Append(ctx context.Context, items ...H) error { a.pending.Add(height, item) // Update cached height and notify waiters - if height > a.height.Load() { - a.height.Store(height) + if height > a.heightSub.Height() { a.heightSub.SetHeight(height) } } @@ -484,7 +477,6 @@ func (a *StoreAdapter[H]) Init(ctx context.Context, item H) error { // Add to pending cache (LRU will evict oldest if full) a.pending.Add(item.Height(), item) - a.height.Store(item.Height()) a.heightSub.SetHeight(item.Height()) a.initialized = true @@ -512,8 +504,8 @@ func (a *StoreAdapter[H]) DeleteRange(ctx context.Context, from, to uint64) erro } // Update cached height if necessary - if from <= a.height.Load() { - a.height.Store(from - 1) + if from <= a.heightSub.Height() { + a.heightSub.SetHeight(from - 1) } return nil From 0124c2512c8057bda31df6132eb0a475c26a130b Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Sun, 1 Feb 2026 20:51:03 +0100 Subject: [PATCH 14/21] remove unecessary go-header store write --- block/internal/syncing/syncer.go | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index a5e49f5a67..a3f999a056 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -17,9 +17,7 @@ import ( coreexecutor "github.com/evstack/ev-node/core/execution" datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/raft" - pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" - "golang.org/x/sync/errgroup" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" @@ -628,24 +626,6 @@ func (s *Syncer) processHeightEvent(ctx context.Context, event *common.DAHeightE } return } - - // only save to p2p stores if the event came from DA - if event.Source == common.SourceDA { // TODO(@julienrbrt): To be reverted once DA Hints are merged (https://github.com/evstack/ev-node/pull/2891) - g, ctx := errgroup.WithContext(ctx) - g.Go(func() error { - // broadcast header locally only — prevents spamming the p2p network with old height notifications, - // allowing the syncer to update its target and fill missing blocks - return s.headerStore.WriteToStoreAndBroadcast(ctx, event.Header, pubsub.WithLocalPublication(true)) - }) - g.Go(func() error { - // broadcast data locally only — prevents spamming the p2p network with old height notifications, - // allowing the syncer to update its target and fill missing blocks - return s.dataStore.WriteToStoreAndBroadcast(ctx, event.Data, pubsub.WithLocalPublication(true)) - }) - if err := g.Wait(); err != nil { - s.logger.Error().Err(err).Msg("failed to append event header and/or data to p2p store") - } - } } var ( From 9101f11abf5d131cd93f528e667209e1989cf2cb Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Sun, 1 Feb 2026 23:01:45 +0100 Subject: [PATCH 15/21] update test based on new behavior --- test/e2e/evm_full_node_e2e_test.go | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/test/e2e/evm_full_node_e2e_test.go b/test/e2e/evm_full_node_e2e_test.go index fc528b1778..45fe627dd8 100644 --- a/test/e2e/evm_full_node_e2e_test.go +++ b/test/e2e/evm_full_node_e2e_test.go @@ -1222,6 +1222,20 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Log("Phase 4: Verifying blockchain state preservation after restart...") + // After restart, the full node needs to re-fetch blocks via P2P from the sequencer. + // Wait for the full node to sync up to the pre-restart height before verifying state preservation. + t.Log("Waiting for full node to re-sync blocks via P2P after restart...") + require.Eventually(t, func() bool { + fnHeader, fnErr := fullNodeClient.HeaderByNumber(ctx, nil) + if fnErr != nil { + return false + } + fnHeight := fnHeader.Number.Uint64() + t.Logf("Full node re-sync progress: current=%d, target=%d", fnHeight, preRestartFnHeight) + return fnHeight >= preRestartFnHeight + }, 60*time.Second, 1*time.Second, "Full node should re-sync to pre-restart height via P2P") + t.Log("Full node re-synced to pre-restart height") + postRestartSeqHeader, err := sequencerClient.HeaderByNumber(ctx, nil) require.NoError(t, err, "Should get sequencer header after restart") postRestartFnHeader, err := fullNodeClient.HeaderByNumber(ctx, nil) @@ -1270,6 +1284,31 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Log("Phase 5: Verifying post-restart functionality and P2P sync...") + // After restart, the full node needs to re-fetch blocks via P2P from the sequencer. + // Wait for the full node to fully sync with the sequencer before submitting new transactions. + t.Log("Waiting for full node to fully sync with sequencer after restart...") + require.Eventually(t, func() bool { + seqHeader, seqErr := sequencerClient.HeaderByNumber(ctx, nil) + fnHeader, fnErr := fullNodeClient.HeaderByNumber(ctx, nil) + + if seqErr != nil || fnErr != nil { + return false + } + + seqHeight := seqHeader.Number.Uint64() + fnHeight := fnHeader.Number.Uint64() + + // Full node should be within 2 blocks of sequencer to be considered fully synced + heightDiff := int64(seqHeight) - int64(fnHeight) + if heightDiff < 0 { + heightDiff = -heightDiff + } + + t.Logf("Sync progress: sequencer=%d, full_node=%d, diff=%d", seqHeight, fnHeight, heightDiff) + return heightDiff <= 2 + }, 60*time.Second, 1*time.Second, "Full node should fully sync with sequencer after restart") + t.Log("Full node fully synced with sequencer") + // Submit new transactions after restart to verify functionality const numPostRestartTxs = 3 var postRestartTxHashes []common.Hash @@ -1283,6 +1322,7 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Logf("Post-restart transaction %d included in sequencer block %d", i+1, txBlockNumber) // Verify transaction syncs to full node (testing P2P sync functionality) + // Use longer timeout after restart since P2P sync pipeline may need time to stabilize verifyTransactionSync(t, sequencerClient, fullNodeClient, txHash, txBlockNumber) t.Logf("✅ Post-restart transaction %d synced to full node via P2P", i+1) From 0588001effa29c22f7eb79a618aca2312d3ae2e4 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 2 Feb 2026 11:29:53 +0100 Subject: [PATCH 16/21] kiss --- test/e2e/evm_full_node_e2e_test.go | 35 +++--------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/test/e2e/evm_full_node_e2e_test.go b/test/e2e/evm_full_node_e2e_test.go index 45fe627dd8..9e302959d2 100644 --- a/test/e2e/evm_full_node_e2e_test.go +++ b/test/e2e/evm_full_node_e2e_test.go @@ -146,7 +146,7 @@ func verifyTransactionSync(t *testing.T, sequencerClient, fullNodeClient *ethcli } } return false - }, 45*time.Second, 1*time.Second, "Full node should sync the block containing the transaction") + }, 60*time.Second, 500*time.Millisecond, "Full node should sync the block containing the transaction") // Final verification - both nodes should have the transaction in the same block sequencerReceipt, err := sequencerClient.TransactionReceipt(ctx, txHash) @@ -341,7 +341,6 @@ func TestEvmSequencerWithFullNodeE2E(t *testing.T) { // Wait for all transactions to be processed time.Sleep(500 * time.Millisecond) - t.Logf("Total transactions submitted: %d across blocks %v", len(txHashes), txBlockNumbers) t.Log("Waiting for full node to sync all transaction blocks...") @@ -1233,7 +1232,7 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode fnHeight := fnHeader.Number.Uint64() t.Logf("Full node re-sync progress: current=%d, target=%d", fnHeight, preRestartFnHeight) return fnHeight >= preRestartFnHeight - }, 60*time.Second, 1*time.Second, "Full node should re-sync to pre-restart height via P2P") + }, DefaultTestTimeout, 500*time.Millisecond, "Full node should re-sync to pre-restart height via P2P") t.Log("Full node re-synced to pre-restart height") postRestartSeqHeader, err := sequencerClient.HeaderByNumber(ctx, nil) @@ -1284,31 +1283,6 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Log("Phase 5: Verifying post-restart functionality and P2P sync...") - // After restart, the full node needs to re-fetch blocks via P2P from the sequencer. - // Wait for the full node to fully sync with the sequencer before submitting new transactions. - t.Log("Waiting for full node to fully sync with sequencer after restart...") - require.Eventually(t, func() bool { - seqHeader, seqErr := sequencerClient.HeaderByNumber(ctx, nil) - fnHeader, fnErr := fullNodeClient.HeaderByNumber(ctx, nil) - - if seqErr != nil || fnErr != nil { - return false - } - - seqHeight := seqHeader.Number.Uint64() - fnHeight := fnHeader.Number.Uint64() - - // Full node should be within 2 blocks of sequencer to be considered fully synced - heightDiff := int64(seqHeight) - int64(fnHeight) - if heightDiff < 0 { - heightDiff = -heightDiff - } - - t.Logf("Sync progress: sequencer=%d, full_node=%d, diff=%d", seqHeight, fnHeight, heightDiff) - return heightDiff <= 2 - }, 60*time.Second, 1*time.Second, "Full node should fully sync with sequencer after restart") - t.Log("Full node fully synced with sequencer") - // Submit new transactions after restart to verify functionality const numPostRestartTxs = 3 var postRestartTxHashes []common.Hash @@ -1322,11 +1296,8 @@ func testSequencerFullNodeRestart(t *testing.T, initialLazyMode, restartLazyMode t.Logf("Post-restart transaction %d included in sequencer block %d", i+1, txBlockNumber) // Verify transaction syncs to full node (testing P2P sync functionality) - // Use longer timeout after restart since P2P sync pipeline may need time to stabilize verifyTransactionSync(t, sequencerClient, fullNodeClient, txHash, txBlockNumber) - t.Logf("✅ Post-restart transaction %d synced to full node via P2P", i+1) - - time.Sleep(5 * time.Millisecond) + t.Logf("✅Post-restart transaction %d synced to full node via P2P", i+1) } // === LAZY MODE POST-TRANSACTION VERIFICATION (if applicable) === From 0cc54d011f539a2d522f3f69ad5ff01eccdce43e Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 2 Feb 2026 14:08:23 +0100 Subject: [PATCH 17/21] update sync service --- pkg/sync/sync_service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/sync/sync_service.go b/pkg/sync/sync_service.go index d4d4d79889..6d75bd73e2 100644 --- a/pkg/sync/sync_service.go +++ b/pkg/sync/sync_service.go @@ -32,9 +32,6 @@ const ( dataSync syncType = "dataSync" ) -// TODO: when we add pruning we can remove this -const ninetyNineYears = 99 * 365 * 24 * time.Hour - // SyncService is the P2P Sync Service for blocks and headers. // // Uses the go-header library for handling all P2P logic. @@ -448,9 +445,12 @@ func newSyncer[H header.Header[H]]( sub header.Subscriber[H], opts []goheadersync.Option, ) (*goheadersync.Syncer[H], error) { + // using a very long duration for effectively disabling pruning and trusting period checks. + const ninetyNineYears = 99 * 365 * 24 * time.Hour + opts = append(opts, goheadersync.WithMetrics(), - goheadersync.WithPruningWindow(ninetyNineYears), + goheadersync.WithPruningWindow(ninetyNineYears), // pruning window not relevant, because of the store wrapper. goheadersync.WithTrustingPeriod(ninetyNineYears), ) return goheadersync.NewSyncer(ex, store, sub, opts...) From 08d8594a5ad94d5ae2d2a8498f95c5276223bd7c Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 2 Feb 2026 14:15:00 +0100 Subject: [PATCH 18/21] [DNM] feat: delete go-header store cmd --- apps/evm/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/evm/main.go b/apps/evm/main.go index 04168e4fc1..647aa0bc9d 100644 --- a/apps/evm/main.go +++ b/apps/evm/main.go @@ -28,6 +28,7 @@ func main() { cmd.InitCmd(), cmd.RunCmd, cmd.NewRollbackCmd(), + cmd.NewCleanupGoHeaderCmd(), rollcmd.VersionCmd, rollcmd.NetInfoCmd, rollcmd.StoreUnsafeCleanCmd, From f1460beac625b07e708536e6ed52464eff6fbab3 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Mon, 2 Feb 2026 14:15:13 +0100 Subject: [PATCH 19/21] updates --- apps/evm/cmd/cleanup_goheader.go | 134 +++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 apps/evm/cmd/cleanup_goheader.go diff --git a/apps/evm/cmd/cleanup_goheader.go b/apps/evm/cmd/cleanup_goheader.go new file mode 100644 index 0000000000..b5c75e6c97 --- /dev/null +++ b/apps/evm/cmd/cleanup_goheader.go @@ -0,0 +1,134 @@ +package cmd + +import ( + "context" + "fmt" + + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + "github.com/spf13/cobra" + + rollcmd "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/store" +) + +const ( + // go-header store prefixes used prior to the unified store migration + headerSyncPrefix = "/headerSync" + dataSyncPrefix = "/dataSync" +) + +// NewCleanupGoHeaderCmd creates a command to delete the legacy go-header store data. +// This data is no longer needed after the migration to the unified store approach +// where HeaderStoreAdapter and DataStoreAdapter read directly from the ev-node store. +func NewCleanupGoHeaderCmd() *cobra.Command { + var dryRun bool + + cmd := &cobra.Command{ + Use: "cleanup-goheader", + Short: "Delete legacy go-header store data from disk", + Long: `Delete the legacy go-header store data (headerSync and dataSync prefixes) from the database. + +This command removes data that was previously duplicated by the go-header library +for P2P sync operations. After the migration to the unified store approach, +this data is no longer needed as the HeaderStoreAdapter and DataStoreAdapter +now read directly from the ev-node store. + +WARNING: Make sure the node is stopped before running this command. +This operation is irreversible.`, + RunE: func(cmd *cobra.Command, args []string) error { + nodeConfig, err := rollcmd.ParseConfig(cmd) + if err != nil { + return err + } + + goCtx := cmd.Context() + if goCtx == nil { + goCtx = context.Background() + } + + // Open the database + rawDB, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, evmDbName) + if err != nil { + return fmt.Errorf("failed to open database: %w", err) + } + defer func() { + if closeErr := rawDB.Close(); closeErr != nil { + cmd.Printf("Warning: failed to close database: %v\n", closeErr) + } + }() + + // Delete headerSync prefix + headerCount, err := deletePrefix(goCtx, rawDB, headerSyncPrefix, dryRun) + if err != nil { + return fmt.Errorf("failed to delete headerSync data: %w", err) + } + + // Delete dataSync prefix + dataCount, err := deletePrefix(goCtx, rawDB, dataSyncPrefix, dryRun) + if err != nil { + return fmt.Errorf("failed to delete dataSync data: %w", err) + } + + totalCount := headerCount + dataCount + + if dryRun { + cmd.Printf("Dry run: would delete %d keys (%d headerSync, %d dataSync)\n", + totalCount, headerCount, dataCount) + } else { + if totalCount == 0 { + cmd.Println("No legacy go-header store data found to delete.") + } else { + cmd.Printf("Successfully deleted %d keys (%d headerSync, %d dataSync)\n", + totalCount, headerCount, dataCount) + } + } + + return nil + }, + } + + cmd.Flags().BoolVar(&dryRun, "dry-run", false, "show what would be deleted without actually deleting") + + return cmd +} + +// deletePrefix deletes all keys with the given prefix from the datastore. +// Returns the number of keys deleted. +func deletePrefix(ctx context.Context, db ds.Batching, prefix string, dryRun bool) (int, error) { + results, err := db.Query(ctx, dsq.Query{ + Prefix: prefix, + KeysOnly: true, + }) + if err != nil { + return 0, fmt.Errorf("failed to query keys with prefix %s: %w", prefix, err) + } + defer results.Close() + + count := 0 + batch, err := db.Batch(ctx) + if err != nil { + return 0, fmt.Errorf("failed to create batch: %w", err) + } + + for result := range results.Next() { + if result.Error != nil { + return count, fmt.Errorf("error iterating results: %w", result.Error) + } + + if !dryRun { + if err := batch.Delete(ctx, ds.NewKey(result.Key)); err != nil { + return count, fmt.Errorf("failed to delete key %s: %w", result.Key, err) + } + } + count++ + } + + if !dryRun && count > 0 { + if err := batch.Commit(ctx); err != nil { + return count, fmt.Errorf("failed to commit batch delete: %w", err) + } + } + + return count, nil +} From 2eca3d1e0213c6c0da8d0656d1d78ac6c4e61a69 Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 6 Feb 2026 11:39:58 +0100 Subject: [PATCH 20/21] extract into standalone --- apps/evm/main.go | 1 - .../db-cleaner}/cleanup_goheader.go | 27 +- tools/db-cleaner/go.mod | 183 +++++ tools/db-cleaner/go.sum | 760 ++++++++++++++++++ 4 files changed, 968 insertions(+), 3 deletions(-) rename {apps/evm/cmd => tools/db-cleaner}/cleanup_goheader.go (89%) create mode 100644 tools/db-cleaner/go.mod create mode 100644 tools/db-cleaner/go.sum diff --git a/apps/evm/main.go b/apps/evm/main.go index 647aa0bc9d..04168e4fc1 100644 --- a/apps/evm/main.go +++ b/apps/evm/main.go @@ -28,7 +28,6 @@ func main() { cmd.InitCmd(), cmd.RunCmd, cmd.NewRollbackCmd(), - cmd.NewCleanupGoHeaderCmd(), rollcmd.VersionCmd, rollcmd.NetInfoCmd, rollcmd.StoreUnsafeCleanCmd, diff --git a/apps/evm/cmd/cleanup_goheader.go b/tools/db-cleaner/cleanup_goheader.go similarity index 89% rename from apps/evm/cmd/cleanup_goheader.go rename to tools/db-cleaner/cleanup_goheader.go index b5c75e6c97..af17c5128f 100644 --- a/apps/evm/cmd/cleanup_goheader.go +++ b/tools/db-cleaner/cleanup_goheader.go @@ -1,23 +1,46 @@ -package cmd +package main import ( "context" "fmt" + "os" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" "github.com/spf13/cobra" rollcmd "github.com/evstack/ev-node/pkg/cmd" + "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/store" ) +const evmDbName = "evm-single" + const ( // go-header store prefixes used prior to the unified store migration headerSyncPrefix = "/headerSync" dataSyncPrefix = "/dataSync" ) +func main() { + // Initiate the root command + rootCmd := &cobra.Command{ + Use: "db-cleaner", + } + + config.AddGlobalFlags(rootCmd, "evm") + + rootCmd.AddCommand( + NewCleanupGoHeaderCmd(), + ) + + if err := rootCmd.Execute(); err != nil { + // Print to stderr and exit with error + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + // NewCleanupGoHeaderCmd creates a command to delete the legacy go-header store data. // This data is no longer needed after the migration to the unified store approach // where HeaderStoreAdapter and DataStoreAdapter read directly from the ev-node store. @@ -25,7 +48,7 @@ func NewCleanupGoHeaderCmd() *cobra.Command { var dryRun bool cmd := &cobra.Command{ - Use: "cleanup-goheader", + Use: "clean-evm", Short: "Delete legacy go-header store data from disk", Long: `Delete the legacy go-header store data (headerSync and dataSync prefixes) from the database. diff --git a/tools/db-cleaner/go.mod b/tools/db-cleaner/go.mod new file mode 100644 index 0000000000..8bd830ad57 --- /dev/null +++ b/tools/db-cleaner/go.mod @@ -0,0 +1,183 @@ +module github.com/evstack/ev-node/tools/db-cleaner + +go 1.25.6 + +require ( + github.com/evstack/ev-node v1.0.0-rc.3 + github.com/ipfs/go-datastore v0.9.0 + github.com/spf13/cobra v1.10.2 +) + +require ( + connectrpc.com/connect v1.19.1 // indirect + connectrpc.com/grpcreflect v1.3.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/celestiaorg/go-header v0.8.1 // indirect + github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgraph-io/badger/v4 v4.5.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect + github.com/dunglas/httpsfv v1.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/evstack/ev-node/core v1.0.0-rc.1 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/filecoin-project/go-jsonrpc v0.10.0 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/goccy/go-yaml v1.19.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/flatbuffers v25.1.24+incompatible // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/raft v1.7.3 // indirect + github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/boxo v0.35.2 // indirect + github.com/ipfs/go-cid v0.6.0 // indirect + github.com/ipfs/go-ds-badger4 v0.1.8 // indirect + github.com/ipfs/go-log/v2 v2.9.1 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.3.0 // indirect + github.com/libp2p/go-libp2p v0.47.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.37.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-libp2p-record v0.3.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-netroute v0.4.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v5 v5.0.1 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.68 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/dtls/v3 v3.0.6 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.3 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.59.0 // indirect + github.com/quic-go/webtransport-go v0.10.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.11.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.uber.org/dig v1.19.0 // indirect + go.uber.org/fx v1.24.0 // indirect + go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.41.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.17.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/grpc v1.78.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect +) diff --git a/tools/db-cleaner/go.sum b/tools/db-cleaner/go.sum new file mode 100644 index 0000000000..c9ca7cc860 --- /dev/null +++ b/tools/db-cleaner/go.sum @@ -0,0 +1,760 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= +connectrpc.com/grpcreflect v1.3.0 h1:Y4V+ACf8/vOb1XOc251Qun7jMB75gCUNw6llvB9csXc= +connectrpc.com/grpcreflect v1.3.0/go.mod h1:nfloOtCS8VUQOQ1+GTdFzVg2CJo4ZGaat8JIovCtDYs= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/celestiaorg/go-header v0.8.1 h1:+DKM6y2zkY8rHMyyl1huUDi/5dy9KTUCnP+GKvdL5Jg= +github.com/celestiaorg/go-header v0.8.1/go.mod h1:X00prITrMa2kxgEX15WQnbLf0uV6tlvTesDKC5KsDVQ= +github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= +github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3 h1:wP84mtwOCVNOTfS3zErICjxKLnh74Z1uf+tdrlSFjVM= +github.com/celestiaorg/go-square/merkle v0.0.0-20240627094109-7d01436067a3/go.mod h1:86qIYnEhmn/hfW+xvw98NOI3zGaDEB3x8JGjYo2FqLs= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= +github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= +github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= +github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034/go.mod h1:zz4KxBkcXUWKjIcrc+uphJ1gPh/t18ymGm3PmQ+VGTk= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54= +github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evstack/ev-node v1.0.0-rc.3 h1:hphJBI0b1TgGN9wajB1twouMVMjhyHXXrS9QaG1XwvQ= +github.com/evstack/ev-node v1.0.0-rc.3/go.mod h1:5Cf3SauhgIV+seQKBJavv3f8ZZw+YTnH5DRJcI4Ooj0= +github.com/evstack/ev-node/core v1.0.0-rc.1 h1:Dic2PMUMAYUl5JW6DkDj6HXDEWYzorVJQuuUJOV0FjE= +github.com/evstack/ev-node/core v1.0.0-rc.1/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/filecoin-project/go-jsonrpc v0.10.0 h1:gZc1thGVD5Khg5Gp1UJibRWZrnNBEP1iFrGOTn0w5TE= +github.com/filecoin-project/go-jsonrpc v0.10.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= +github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o= +github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/raft v1.7.3 h1:DxpEqZJysHN0wK+fviai5mFcSYsCkNpFUl1xpAW8Rbo= +github.com/hashicorp/raft v1.7.3/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148 h1:tjaIHlfKX22DCCPTx2mK+6N/kTP9DV7B3bxEUyQtjKA= +github.com/hashicorp/raft-boltdb v0.0.0-20251103221153-05f9dd7a5148/go.mod h1:sgCxzMuvQ3huVxgmeDdj73YIMmezWZ40HQu2IPmjJWk= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8= +github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U= +github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk= +github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA= +github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30= +github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ= +github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= +github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger4 v0.1.8 h1:frNczf5CjCVm62RJ5mW5tD/oLQY/9IKAUpKviRV9QAI= +github.com/ipfs/go-ds-badger4 v0.1.8/go.mod h1:FdqSLA5TMsyqooENB/Hf4xzYE/iH0z/ErLD6ogtfMrA= +github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk= +github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo= +github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc= +github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc= +github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.37.1 h1:jtX8bQIXVCs6/allskNB4m5n95Xvwav7wHAhopGZfS0= +github.com/libp2p/go-libp2p-kad-dht v0.37.1/go.mod h1:Uwokdh232k9Y1uMy2yJOK5zb7hpMHn4P8uWS4s9i05Q= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= +github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= +github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q= +github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY= +github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= +github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI= +github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo= +golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= From 7472bd757992718b6889246e821236f05f05389e Mon Sep 17 00:00:00 2001 From: Julien Robert Date: Fri, 6 Feb 2026 12:04:08 +0100 Subject: [PATCH 21/21] correct prefix --- tools/db-cleaner/cleanup_goheader.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/tools/db-cleaner/cleanup_goheader.go b/tools/db-cleaner/cleanup_goheader.go index af17c5128f..7b54d9512a 100644 --- a/tools/db-cleaner/cleanup_goheader.go +++ b/tools/db-cleaner/cleanup_goheader.go @@ -6,7 +6,8 @@ import ( "os" ds "github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-datastore/query" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/query" "github.com/spf13/cobra" rollcmd "github.com/evstack/ev-node/pkg/cmd" @@ -81,14 +82,15 @@ This operation is irreversible.`, } }() + evDB := store.NewEvNodeKVStore(rawDB) // Delete headerSync prefix - headerCount, err := deletePrefix(goCtx, rawDB, headerSyncPrefix, dryRun) + headerCount, err := deletePrefix(goCtx, evDB, headerSyncPrefix, dryRun) if err != nil { return fmt.Errorf("failed to delete headerSync data: %w", err) } // Delete dataSync prefix - dataCount, err := deletePrefix(goCtx, rawDB, dataSyncPrefix, dryRun) + dataCount, err := deletePrefix(goCtx, evDB, dataSyncPrefix, dryRun) if err != nil { return fmt.Errorf("failed to delete dataSync data: %w", err) } @@ -119,17 +121,17 @@ This operation is irreversible.`, // deletePrefix deletes all keys with the given prefix from the datastore. // Returns the number of keys deleted. func deletePrefix(ctx context.Context, db ds.Batching, prefix string, dryRun bool) (int, error) { - results, err := db.Query(ctx, dsq.Query{ - Prefix: prefix, + count := 0 + + pdb := namespace.Wrap(db, ds.NewKey(prefix)) + results, err := pdb.Query(ctx, query.Query{ KeysOnly: true, }) if err != nil { return 0, fmt.Errorf("failed to query keys with prefix %s: %w", prefix, err) } - defer results.Close() - count := 0 - batch, err := db.Batch(ctx) + batch, err := pdb.Batch(ctx) if err != nil { return 0, fmt.Errorf("failed to create batch: %w", err) }