diff --git a/blockdb/blockdb.go b/blockdb/blockdb.go index 3846def88..9217246dd 100644 --- a/blockdb/blockdb.go +++ b/blockdb/blockdb.go @@ -4,23 +4,25 @@ import ( "context" "fmt" + "github.com/sirupsen/logrus" + "github.com/ethpandaops/dora/blockdb/pebble" "github.com/ethpandaops/dora/blockdb/s3" + "github.com/ethpandaops/dora/blockdb/tiered" "github.com/ethpandaops/dora/blockdb/types" dtypes "github.com/ethpandaops/dora/types" ) -// BlockDb wraps the underlying storage engine for both beacon block data -// and execution data. +// BlockDb is the main wrapper for block database operations. type BlockDb struct { engine types.BlockDbEngine execEngine types.ExecDataEngine // nil if engine doesn't support exec data } -// GlobalBlockDb is the global singleton BlockDb instance. +// GlobalBlockDb is the global block database instance. var GlobalBlockDb *BlockDb -// InitWithPebble initializes the global BlockDb with a Pebble backend. +// InitWithPebble initializes the block database with Pebble (local) storage. func InitWithPebble(config dtypes.PebbleBlockDBConfig) error { engine, err := pebble.NewPebbleEngine(config) if err != nil { @@ -41,7 +43,7 @@ func InitWithPebble(config dtypes.PebbleBlockDBConfig) error { return nil } -// InitWithS3 initializes the global BlockDb with an S3 backend. +// InitWithS3 initializes the block database with S3 (remote) storage. func InitWithS3(config dtypes.S3BlockDBConfig) error { engine, err := s3.NewS3Engine(config) if err != nil { @@ -62,26 +64,85 @@ func InitWithS3(config dtypes.S3BlockDBConfig) error { return nil } +// InitWithTiered initializes the block database with tiered storage (Pebble cache + S3 backend). +func InitWithTiered(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) error { + engine, err := tiered.NewTieredEngine(config, logger) + if err != nil { + return err + } + + db := &BlockDb{ + engine: engine, + } + + // Check if tiered engine supports exec data + if execEngine, ok := engine.(types.ExecDataEngine); ok { + db.execEngine = execEngine + } + + GlobalBlockDb = db + + return nil +} + +// Close closes the block database. func (db *BlockDb) Close() error { return db.engine.Close() } -func (db *BlockDb) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { - return db.engine.GetBlock(ctx, slot, root, parseBlock) +// GetBlock retrieves block data with selective loading based on flags. +func (db *BlockDb) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + return db.engine.GetBlock(ctx, slot, root, flags, parseBlock, parsePayload) +} + +// GetStoredComponents returns which components exist for a block. +func (db *BlockDb) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + return db.engine.GetStoredComponents(ctx, slot, root) } -func (db *BlockDb) AddBlock(ctx context.Context, slot uint64, root []byte, header_ver uint64, header_data []byte, body_ver uint64, body_data []byte) (bool, error) { +// AddBlock stores block data. Returns (added, updated, error). +func (db *BlockDb) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + headerVer uint64, + headerData []byte, + bodyVer uint64, + bodyData []byte, + payloadVer uint64, + payloadData []byte, + balVer uint64, + balData []byte, +) (bool, bool, error) { return db.engine.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { return &types.BlockData{ - HeaderVersion: header_ver, - HeaderData: header_data, - BodyVersion: body_ver, - BodyData: body_data, + HeaderVersion: headerVer, + HeaderData: headerData, + BodyVersion: bodyVer, + BodyData: bodyData, + PayloadVersion: payloadVer, + PayloadData: payloadData, + BalVersion: balVer, + BalData: balData, }, nil }) } -func (db *BlockDb) AddBlockWithCallback(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { +// AddBlockWithCallback stores block data using a callback for deferred data loading. +// Returns (added, updated, error). +func (db *BlockDb) AddBlockWithCallback( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { return db.engine.AddBlock(ctx, slot, root, dataCb) } diff --git a/blockdb/pebble/cleanup.go b/blockdb/pebble/cleanup.go new file mode 100644 index 000000000..5a3cf787e --- /dev/null +++ b/blockdb/pebble/cleanup.go @@ -0,0 +1,439 @@ +package pebble + +import ( + "context" + "encoding/binary" + "sort" + "sync" + "time" + + "github.com/cockroachdb/pebble" + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/blockdb/types" + dtypes "github.com/ethpandaops/dora/types" +) + +const ( + // KeyNamespaceLRU is the namespace for LRU tracking data. + KeyNamespaceLRU uint16 = 2 + + // LRU value format: [headerAccess (8B)] [bodyAccess (8B)] [payloadAccess (8B)] [balAccess (8B)] + // Each access time is a Unix nanosecond timestamp, 0 means never accessed. + lruValueSize = 32 + + // Maximum number of LRU updates to buffer before forcing a flush. + maxLRUBufferSize = 1000 +) + +// CacheCleanup manages background cleanup of cached data. +type CacheCleanup struct { + engine *PebbleEngine + config dtypes.PebbleBlockDBConfig + logger logrus.FieldLogger + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // LRU update buffer + lruMu sync.Mutex + lruBuffer map[string]*lruUpdate // root hex -> update +} + +// lruUpdate holds pending LRU timestamp updates for a block. +type lruUpdate struct { + root []byte + headerAccess int64 // Unix nano, 0 = no update + bodyAccess int64 + payloadAccess int64 + balAccess int64 +} + +// NewCacheCleanup creates a new cache cleanup manager. +func NewCacheCleanup(engine *PebbleEngine, logger logrus.FieldLogger) *CacheCleanup { + ctx, cancel := context.WithCancel(context.Background()) + + return &CacheCleanup{ + engine: engine, + config: engine.GetConfig(), + logger: logger.WithField("component", "pebble-cleanup"), + ctx: ctx, + cancel: cancel, + lruBuffer: make(map[string]*lruUpdate, 100), + } +} + +// Start begins the background cleanup loop. +func (c *CacheCleanup) Start() { + if c.config.CleanupInterval == 0 { + c.logger.Info("cleanup disabled (interval is 0)") + return + } + + c.wg.Add(1) + go c.runCleanupLoop() +} + +// Stop stops the background cleanup loop. +func (c *CacheCleanup) Stop() { + c.cancel() + c.wg.Wait() + + // Final flush of LRU buffer + c.FlushLRU() +} + +// runCleanupLoop runs the periodic cleanup. +func (c *CacheCleanup) runCleanupLoop() { + defer c.wg.Done() + + ticker := time.NewTicker(c.config.CleanupInterval) + defer ticker.Stop() + + for { + select { + case <-c.ctx.Done(): + return + case <-ticker.C: + c.FlushLRU() + c.runCleanup() + } + } +} + +// RecordAccess records an access for LRU tracking. Buffered until flush. +func (c *CacheCleanup) RecordAccess(root []byte, flags types.BlockDataFlags) { + c.lruMu.Lock() + defer c.lruMu.Unlock() + + key := string(root) + now := time.Now().UnixNano() + + update, exists := c.lruBuffer[key] + if !exists { + rootCopy := make([]byte, len(root)) + copy(rootCopy, root) + update = &lruUpdate{root: rootCopy} + c.lruBuffer[key] = update + } + + if flags.Has(types.BlockDataFlagHeader) { + update.headerAccess = now + } + if flags.Has(types.BlockDataFlagBody) { + update.bodyAccess = now + } + if flags.Has(types.BlockDataFlagPayload) { + update.payloadAccess = now + } + if flags.Has(types.BlockDataFlagBal) { + update.balAccess = now + } + + // Force flush if buffer is too large + if len(c.lruBuffer) >= maxLRUBufferSize { + c.flushLRULocked() + } +} + +// FlushLRU flushes buffered LRU updates to Pebble. +func (c *CacheCleanup) FlushLRU() { + c.lruMu.Lock() + defer c.lruMu.Unlock() + c.flushLRULocked() +} + +// flushLRULocked flushes LRU buffer (must hold lruMu). +func (c *CacheCleanup) flushLRULocked() { + if len(c.lruBuffer) == 0 { + return + } + + db := c.engine.GetDB() + batch := db.NewBatch() + + for _, update := range c.lruBuffer { + key := makeLRUKey(update.root) + + // Read existing LRU data + existing := make([]byte, lruValueSize) + if res, closer, err := db.Get(key); err == nil { + if len(res) >= lruValueSize { + copy(existing, res) + } + closer.Close() + } + + // Merge updates (only update non-zero values) + value := make([]byte, lruValueSize) + copy(value, existing) + + if update.headerAccess > 0 { + binary.BigEndian.PutUint64(value[0:8], uint64(update.headerAccess)) + } + if update.bodyAccess > 0 { + binary.BigEndian.PutUint64(value[8:16], uint64(update.bodyAccess)) + } + if update.payloadAccess > 0 { + binary.BigEndian.PutUint64(value[16:24], uint64(update.payloadAccess)) + } + if update.balAccess > 0 { + binary.BigEndian.PutUint64(value[24:32], uint64(update.balAccess)) + } + + batch.Set(key, value, nil) + } + + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to flush LRU updates: %v", err) + } + batch.Close() + + // Clear buffer + c.lruBuffer = make(map[string]*lruUpdate, 100) +} + +// makeLRUKey creates the key for LRU data. +func makeLRUKey(root []byte) []byte { + key := make([]byte, 2+len(root)) + binary.BigEndian.PutUint16(key[:2], KeyNamespaceLRU) + copy(key[2:], root) + return key +} + +// runCleanup performs cleanup for all configured component types. +func (c *CacheCleanup) runCleanup() { + c.logger.Debug("starting cache cleanup") + + componentConfigs := map[uint16]*dtypes.BlockDbRetentionConfig{ + BlockTypeHeader: &c.config.HeaderRetention, + BlockTypeBody: &c.config.BodyRetention, + BlockTypePayload: &c.config.PayloadRetention, + BlockTypeBal: &c.config.BalRetention, + } + + for blockType, config := range componentConfigs { + if config == nil || !config.Enabled { + continue + } + + switch config.CleanupMode { + case "age": + c.cleanupByAge(blockType, config.RetentionTime) + case "lru": + c.cleanupByLRU(blockType, config.MaxSize*1024*1024) // Convert MB to bytes + } + } +} + +// cleanupByAge removes entries older than the retention time based on storage timestamp. +func (c *CacheCleanup) cleanupByAge(blockType uint16, retention time.Duration) { + if retention == 0 { + return + } + + cutoff := time.Now().Add(-retention) + deleted := 0 + + db := c.engine.GetDB() + iter, err := db.NewIter(&pebble.IterOptions{}) + if err != nil { + c.logger.Errorf("failed to create iterator: %v", err) + return + } + defer iter.Close() + + batch := db.NewBatch() + defer batch.Close() + + for iter.First(); iter.Valid(); iter.Next() { + key := iter.Key() + + // Check if this key is in the block namespace + if len(key) < 36 { // 2 (namespace) + 32 (root) + 2 (type) + continue + } + + namespace := binary.BigEndian.Uint16(key[:2]) + if namespace != KeyNamespaceBlock { + continue + } + + keyType := binary.BigEndian.Uint16(key[len(key)-2:]) + if keyType != blockType { + continue + } + + // Check timestamp from value (stored at offset 8) + value := iter.Value() + if len(value) < valueHeaderSize { + continue + } + + timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(value[8:16]))) + if timestamp.Before(cutoff) { + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + batch.Delete(keyCopy, nil) + deleted++ + } + } + + if deleted > 0 { + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to commit age cleanup batch: %v", err) + } else { + c.logger.Infof("cleaned up %d entries for block type %d (age-based)", deleted, blockType) + } + } +} + +// lruEntry represents an entry for LRU cleanup sorting. +type lruEntry struct { + root []byte + key []byte + size int64 + lastAccess int64 +} + +// cleanupByLRU removes least recently used entries when size exceeds limit. +func (c *CacheCleanup) cleanupByLRU(blockType uint16, maxSize int64) { + if maxSize == 0 { + return + } + + db := c.engine.GetDB() + + // First pass: collect all entries with their sizes and LRU timestamps + entries := make([]*lruEntry, 0, 1000) + var totalSize int64 + + iter, err := db.NewIter(&pebble.IterOptions{}) + if err != nil { + c.logger.Errorf("failed to create iterator: %v", err) + return + } + + // Scan block entries + for iter.First(); iter.Valid(); iter.Next() { + key := iter.Key() + + if len(key) < 36 { + continue + } + + namespace := binary.BigEndian.Uint16(key[:2]) + if namespace != KeyNamespaceBlock { + continue + } + + keyType := binary.BigEndian.Uint16(key[len(key)-2:]) + if keyType != blockType { + continue + } + + // Extract root from key + root := key[2 : len(key)-2] + value := iter.Value() + size := int64(len(value)) + totalSize += size + + // Get LRU timestamp for this entry + lastAccess := c.getLRUTimestamp(db, root, blockType) + + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + rootCopy := make([]byte, len(root)) + copy(rootCopy, root) + + entries = append(entries, &lruEntry{ + root: rootCopy, + key: keyCopy, + size: size, + lastAccess: lastAccess, + }) + } + iter.Close() + + // Check if we need to clean up + if totalSize <= maxSize { + return + } + + // Sort by last access time (oldest first, 0 = never accessed = oldest) + sort.Slice(entries, func(i, j int) bool { + return entries[i].lastAccess < entries[j].lastAccess + }) + + // Delete oldest entries until we're under the limit + batch := db.NewBatch() + defer batch.Close() + + deleted := 0 + freedSize := int64(0) + targetFree := totalSize - maxSize + + for _, entry := range entries { + if freedSize >= targetFree { + break + } + + batch.Delete(entry.key, nil) + freedSize += entry.size + deleted++ + } + + if deleted > 0 { + if err := batch.Commit(nil); err != nil { + c.logger.Errorf("failed to commit LRU cleanup batch: %v", err) + } else { + c.logger.Infof("cleaned up %d entries for block type %d (LRU-based, freed %d bytes)", + deleted, blockType, freedSize) + } + } +} + +// getLRUTimestamp retrieves the LRU timestamp for a specific component. +func (c *CacheCleanup) getLRUTimestamp(db *pebble.DB, root []byte, blockType uint16) int64 { + key := makeLRUKey(root) + + res, closer, err := db.Get(key) + if err != nil { + return 0 // Never accessed + } + defer closer.Close() + + if len(res) < lruValueSize { + return 0 + } + + // Extract timestamp based on block type + var offset int + switch blockType { + case BlockTypeHeader: + offset = 0 + case BlockTypeBody: + offset = 8 + case BlockTypePayload: + offset = 16 + case BlockTypeBal: + offset = 24 + default: + return 0 + } + + return int64(binary.BigEndian.Uint64(res[offset : offset+8])) +} + +// DeleteLRU removes LRU data for a block (call when deleting block data). +func (c *CacheCleanup) DeleteLRU(root []byte) { + db := c.engine.GetDB() + key := makeLRUKey(root) + db.Delete(key, nil) + + // Also remove from buffer + c.lruMu.Lock() + delete(c.lruBuffer, string(root)) + c.lruMu.Unlock() +} diff --git a/blockdb/pebble/pebble.go b/blockdb/pebble/pebble.go index d8619201e..1da3d6b8c 100644 --- a/blockdb/pebble/pebble.go +++ b/blockdb/pebble/pebble.go @@ -3,6 +3,8 @@ package pebble import ( "context" "encoding/binary" + "fmt" + "time" "github.com/cockroachdb/pebble" "github.com/ethpandaops/dora/blockdb/types" @@ -14,12 +16,18 @@ const ( ) const ( - BlockTypeHeader uint16 = 1 - BlockTypeBody uint16 = 2 + BlockTypeHeader uint16 = 1 + BlockTypeBody uint16 = 2 + BlockTypePayload uint16 = 3 + BlockTypeBal uint16 = 4 ) +// Value format: [version (8 bytes)] [timestamp (8 bytes)] [data] +const valueHeaderSize = 16 + type PebbleEngine struct { - db *pebble.DB + db *pebble.DB + config dtypes.PebbleBlockDBConfig } func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, error) { @@ -34,153 +42,259 @@ func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, er } return &PebbleEngine{ - db: db, + db: db, + config: config, }, nil } func (e *PebbleEngine) Close() error { - err := e.db.Close() - if err != nil { - return err - } - - return nil + return e.db.Close() } -func (e *PebbleEngine) getBlockHeader(root []byte) ([]byte, uint64, error) { +// makeKey creates a key for the given root and block type. +func makeKey(root []byte, blockType uint16) []byte { key := make([]byte, 2+len(root)+2) binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader) + binary.BigEndian.PutUint16(key[2+len(root):], blockType) + return key +} + +// getComponent retrieves a single component from the database. +// Returns (data, version, timestamp, error). Returns nil data if not found. +func (e *PebbleEngine) getComponent(root []byte, blockType uint16) ([]byte, uint64, time.Time, error) { + key := makeKey(root, blockType) res, closer, err := e.db.Get(key) - if err != nil && err != pebble.ErrNotFound { - return nil, 0, err + if err == pebble.ErrNotFound { + return nil, 0, time.Time{}, nil + } + if err != nil { + return nil, 0, time.Time{}, err } defer closer.Close() - if err == pebble.ErrNotFound || len(res) == 0 { - return nil, 0, nil + if len(res) < valueHeaderSize { + return nil, 0, time.Time{}, nil } version := binary.BigEndian.Uint64(res[:8]) - header := make([]byte, len(res)-8) - copy(header, res[8:]) + timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(res[8:16]))) - return header, version, nil + data := make([]byte, len(res)-valueHeaderSize) + copy(data, res[valueHeaderSize:]) + + return data, version, timestamp, nil } -func (e *PebbleEngine) getBlockBody(root []byte, parser func(uint64, []byte) (interface{}, error)) (interface{}, uint64, error) { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody) +// setComponent stores a single component in the database. +func (e *PebbleEngine) setComponent(root []byte, blockType uint16, version uint64, data []byte) error { + key := makeKey(root, blockType) - res, closer, err := e.db.Get(key) - if err != nil && err != pebble.ErrNotFound { - return nil, 0, err - } - defer closer.Close() + value := make([]byte, valueHeaderSize+len(data)) + binary.BigEndian.PutUint64(value[:8], version) + binary.BigEndian.PutUint64(value[8:16], uint64(time.Now().UnixNano())) + copy(value[valueHeaderSize:], data) - if err == pebble.ErrNotFound || len(res) == 0 { - return nil, 0, nil + return e.db.Set(key, value, nil) +} + +// componentExists checks if a component exists in the database. +func (e *PebbleEngine) componentExists(root []byte, blockType uint16) bool { + key := makeKey(root, blockType) + + res, closer, err := e.db.Get(key) + if err == nil && len(res) >= valueHeaderSize { + closer.Close() + return true } + return false +} - version := binary.BigEndian.Uint64(res[:8]) - block := res[8:] +// GetStoredComponents returns which components exist for a block. +func (e *PebbleEngine) GetStoredComponents(_ context.Context, _ uint64, root []byte) (types.BlockDataFlags, error) { + var flags types.BlockDataFlags - body, err := parser(version, block) - if err != nil { - return nil, 0, err + if e.componentExists(root, BlockTypeHeader) { + flags |= types.BlockDataFlagHeader + } + if e.componentExists(root, BlockTypeBody) { + flags |= types.BlockDataFlagBody + } + if e.componentExists(root, BlockTypePayload) { + flags |= types.BlockDataFlagPayload + } + if e.componentExists(root, BlockTypeBal) { + flags |= types.BlockDataFlagBal } - return body, version, nil + return flags, nil } -func (e *PebbleEngine) GetBlock(_ context.Context, _ uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { - header, header_ver, err := e.getBlockHeader(root) - if err != nil { - return nil, err +// GetBlock retrieves block data with selective loading based on flags. +// Note: LRU access tracking should be done by the caller via CacheCleanup.RecordAccess() +// to avoid expensive read-modify-write operations on every access. +func (e *PebbleEngine) GetBlock( + _ context.Context, + _ uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + blockData := &types.BlockData{} + + // Load header if requested + if flags.Has(types.BlockDataFlagHeader) { + data, version, _, err := e.getComponent(root, BlockTypeHeader) + if err != nil { + return nil, fmt.Errorf("failed to get header: %w", err) + } + if data != nil { + blockData.HeaderVersion = version + blockData.HeaderData = data + } } - blockData := &types.BlockData{ - HeaderVersion: header_ver, - HeaderData: header, - } + // Load body if requested + if flags.Has(types.BlockDataFlagBody) { + data, version, _, err := e.getComponent(root, BlockTypeBody) + if err != nil { + return nil, fmt.Errorf("failed to get body: %w", err) + } - if parseBlock == nil { - parseBlock = func(version uint64, block []byte) (interface{}, error) { - blockData.BodyData = make([]byte, len(block)) - copy(blockData.BodyData, block) - return nil, nil + if data != nil { + blockData.BodyVersion = version + if parseBlock != nil { + body, err := parseBlock(version, data) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = data + } } } - body, body_ver, err := e.getBlockBody(root, parseBlock) - if err != nil { - return nil, err - } + // Load payload if requested + if flags.Has(types.BlockDataFlagPayload) { + data, version, _, err := e.getComponent(root, BlockTypePayload) + if err != nil { + return nil, fmt.Errorf("failed to get payload: %w", err) + } - blockData.Body = body - blockData.BodyVersion = body_ver + if data != nil { + blockData.PayloadVersion = version + if parsePayload != nil { + payload, err := parsePayload(version, data) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = data + } + } + } - return blockData, nil -} + // Load BAL if requested + if flags.Has(types.BlockDataFlagBal) { + data, version, _, err := e.getComponent(root, BlockTypeBal) + if err != nil { + return nil, fmt.Errorf("failed to get BAL: %w", err) + } -func (e *PebbleEngine) checkBlock(key []byte) bool { - res, closer, err := e.db.Get(key) - if err == nil && len(res) > 0 { - closer.Close() - return true + if data != nil { + blockData.BalVersion = version + blockData.BalData = data + } } - return false + return blockData, nil } -func (e *PebbleEngine) addBlockHeader(key []byte, version uint64, header []byte) error { - data := make([]byte, 8+len(header)) - binary.BigEndian.PutUint64(data[:8], version) +// AddBlock stores block data. Returns (added, updated, error). +// - added: true if a new block was created +// - updated: true if an existing block was updated with new components +func (e *PebbleEngine) AddBlock( + _ context.Context, + _ uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { + // Check what components already exist + existingFlags, err := e.GetStoredComponents(context.Background(), 0, root) + if err != nil { + return false, false, fmt.Errorf("failed to check existing components: %w", err) + } - return e.db.Set(key, data, nil) -} + // Get the new data + blockData, err := dataCb() + if err != nil { + return false, false, err + } -func (e *PebbleEngine) addBlockBody(root []byte, version uint64, block []byte) error { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody) + // Determine what new components we have + var newFlags types.BlockDataFlags + if len(blockData.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(blockData.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if blockData.BalVersion != 0 && len(blockData.BalData) > 0 { + newFlags |= types.BlockDataFlagBal + } - data := make([]byte, 8+len(block)) - binary.BigEndian.PutUint64(data[:8], version) - copy(data[8:], block) + // Calculate components to add (new components not in existing) + toAdd := newFlags &^ existingFlags - return e.db.Set(key, data, nil) -} + if toAdd == 0 { + // Nothing new to add + return false, false, nil + } -func (e *PebbleEngine) AddBlock(_ context.Context, _ uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { - key := make([]byte, 2+len(root)+2) - binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock) - copy(key[2:], root) - binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader) + isNew := existingFlags == 0 + isUpdated := !isNew - if e.checkBlock(key) { - return false, nil + // Store new components + if toAdd.Has(types.BlockDataFlagHeader) { + if err := e.setComponent(root, BlockTypeHeader, blockData.HeaderVersion, blockData.HeaderData); err != nil { + return false, false, fmt.Errorf("failed to store header: %w", err) + } } - blockData, err := dataCb() - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagBody) { + if err := e.setComponent(root, BlockTypeBody, blockData.BodyVersion, blockData.BodyData); err != nil { + return false, false, fmt.Errorf("failed to store body: %w", err) + } } - err = e.addBlockHeader(key, blockData.HeaderVersion, blockData.HeaderData) - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagPayload) { + if err := e.setComponent(root, BlockTypePayload, blockData.PayloadVersion, blockData.PayloadData); err != nil { + return false, false, fmt.Errorf("failed to store payload: %w", err) + } } - err = e.addBlockBody(root, blockData.BodyVersion, blockData.BodyData) - if err != nil { - return false, err + if toAdd.Has(types.BlockDataFlagBal) { + if err := e.setComponent(root, BlockTypeBal, blockData.BalVersion, blockData.BalData); err != nil { + return false, false, fmt.Errorf("failed to store BAL: %w", err) + } } - return true, nil + return isNew, isUpdated, nil +} + +// GetDB returns the underlying Pebble database for cleanup operations. +func (e *PebbleEngine) GetDB() *pebble.DB { + return e.db +} + +// GetConfig returns the engine configuration. +func (e *PebbleEngine) GetConfig() dtypes.PebbleBlockDBConfig { + return e.config } diff --git a/blockdb/s3/format.go b/blockdb/s3/format.go new file mode 100644 index 000000000..79fa2c04a --- /dev/null +++ b/blockdb/s3/format.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "encoding/binary" + "fmt" + + "github.com/attestantio/go-eth2-client/spec" + + "github.com/ethpandaops/dora/blockdb/types" +) + +// Object format versions: +// v1: header + body (pre-gloas blocks) +// v2: header + body + payload + bal (gloas+ blocks, payload/BAL introduced in same fork) +// +// Note: Both payload and BAL may be empty (length 0), but body is always required. + +// Metadata sizes by version +const ( + metadataSizeV1 = 16 // 4 (version) + 4 (headerLen) + 4 (bodyVer) + 4 (bodyLen) + metadataSizeV2 = 32 // v1 + 4 (payloadVer) + 4 (payloadLen) + 4 (balVer) + 4 (balLen) + + // Maximum metadata size for initial read + maxMetadataSize = 64 +) + +// objectMetadata represents the metadata for all format versions. +type objectMetadata struct { + ObjVersion uint32 + + // Header (always present) + HeaderLength uint32 + + // Body (always required) + BodyVersion uint32 + BodyLength uint32 + + // Payload (v2+, may be empty) + PayloadVersion uint32 + PayloadLength uint32 + + // BAL (v2+, may be empty) + BalVersion uint32 + BalLength uint32 +} + +// metadataSize returns the metadata size for this object. +func (m *objectMetadata) metadataSize() int { + switch m.ObjVersion { + case 1: + return metadataSizeV1 + case 2: + return metadataSizeV2 + default: + return metadataSizeV2 + } +} + +// headerOffset returns the byte offset of the header data. +func (m *objectMetadata) headerOffset() int { + return m.metadataSize() +} + +// bodyOffset returns the byte offset of the body data. +func (m *objectMetadata) bodyOffset() int { + return m.metadataSize() + int(m.HeaderLength) +} + +// payloadOffset returns the byte offset of the payload data. +func (m *objectMetadata) payloadOffset() int { + return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) +} + +// balOffset returns the byte offset of the BAL data. +func (m *objectMetadata) balOffset() int { + return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) + int(m.PayloadLength) +} + +// storedFlags returns which components are stored in this object. +func (m *objectMetadata) storedFlags() types.BlockDataFlags { + var flags types.BlockDataFlags + + if m.HeaderLength > 0 { + flags |= types.BlockDataFlagHeader + } + if m.BodyLength > 0 { + flags |= types.BlockDataFlagBody + } + if m.PayloadLength > 0 && m.ObjVersion >= 2 { + flags |= types.BlockDataFlagPayload + } + if m.BalLength > 0 && m.ObjVersion >= 2 { + flags |= types.BlockDataFlagBal + } + + return flags +} + +// readObjectMetadata reads metadata from any format version. +func readObjectMetadata(data []byte) (*objectMetadata, error) { + if len(data) < 4 { + return nil, fmt.Errorf("data too short for metadata version") + } + + version := binary.BigEndian.Uint32(data[:4]) + meta := &objectMetadata{ObjVersion: version} + + switch version { + case 1: + if len(data) < metadataSizeV1 { + return nil, fmt.Errorf("data too short for v1 metadata: need %d, got %d", metadataSizeV1, len(data)) + } + meta.HeaderLength = binary.BigEndian.Uint32(data[4:8]) + meta.BodyVersion = binary.BigEndian.Uint32(data[8:12]) + meta.BodyLength = binary.BigEndian.Uint32(data[12:16]) + + case 2: + if len(data) < metadataSizeV2 { + return nil, fmt.Errorf("data too short for v2 metadata: need %d, got %d", metadataSizeV2, len(data)) + } + meta.HeaderLength = binary.BigEndian.Uint32(data[4:8]) + meta.BodyVersion = binary.BigEndian.Uint32(data[8:12]) + meta.BodyLength = binary.BigEndian.Uint32(data[12:16]) + meta.PayloadVersion = binary.BigEndian.Uint32(data[16:20]) + meta.PayloadLength = binary.BigEndian.Uint32(data[20:24]) + meta.BalVersion = binary.BigEndian.Uint32(data[24:28]) + meta.BalLength = binary.BigEndian.Uint32(data[28:32]) + + default: + return nil, fmt.Errorf("unsupported object version: %d", version) + } + + return meta, nil +} + +// writeObjectMetadata creates metadata bytes for the given BlockData. +// Uses v1 format for pre-gloas blocks, v2 for gloas+ blocks. +func writeObjectMetadata(data *types.BlockData) []byte { + // Use v2 format only for gloas+ blocks (which can have payload/BAL) + if data.BodyVersion >= uint64(spec.DataVersionGloas) { + meta := make([]byte, metadataSizeV2) + binary.BigEndian.PutUint32(meta[0:4], 2) + binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData))) + binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion)) + binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData))) + binary.BigEndian.PutUint32(meta[16:20], uint32(data.PayloadVersion)) + binary.BigEndian.PutUint32(meta[20:24], uint32(len(data.PayloadData))) + binary.BigEndian.PutUint32(meta[24:28], uint32(data.BalVersion)) + binary.BigEndian.PutUint32(meta[28:32], uint32(len(data.BalData))) + return meta + } + + // Use v1 format for pre-gloas blocks + meta := make([]byte, metadataSizeV1) + binary.BigEndian.PutUint32(meta[0:4], 1) + binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData))) + binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion)) + binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData))) + return meta +} + +// getDataRange calculates the single byte range spanning all requested components. +// Returns (start, end) where end is inclusive. Returns (-1, -1) if no data to fetch. +func (m *objectMetadata) getDataRange(flags types.BlockDataFlags) (int64, int64) { + var start int64 = -1 + var end int64 = -1 + + // Check each component in order (they're stored sequentially) + if flags.Has(types.BlockDataFlagHeader) && m.HeaderLength > 0 { + start = int64(m.headerOffset()) + end = start + int64(m.HeaderLength) - 1 + } + + if flags.Has(types.BlockDataFlagBody) && m.BodyLength > 0 { + bodyStart := int64(m.bodyOffset()) + bodyEnd := bodyStart + int64(m.BodyLength) - 1 + if start < 0 { + start = bodyStart + } + end = bodyEnd + } + + if flags.Has(types.BlockDataFlagPayload) && m.PayloadLength > 0 && m.ObjVersion >= 2 { + payloadStart := int64(m.payloadOffset()) + payloadEnd := payloadStart + int64(m.PayloadLength) - 1 + if start < 0 { + start = payloadStart + } + end = payloadEnd + } + + if flags.Has(types.BlockDataFlagBal) && m.BalLength > 0 && m.ObjVersion >= 2 { + balStart := int64(m.balOffset()) + balEnd := balStart + int64(m.BalLength) - 1 + if start < 0 { + start = balStart + } + end = balEnd + } + + return start, end +} diff --git a/blockdb/s3/s3store.go b/blockdb/s3/s3store.go index 564dc9a80..889018b19 100644 --- a/blockdb/s3/s3store.go +++ b/blockdb/s3/s3store.go @@ -3,13 +3,13 @@ package s3 import ( "bytes" "context" - "encoding/binary" "encoding/hex" "fmt" "io" "path" "strings" + "github.com/attestantio/go-eth2-client/spec" "github.com/ethpandaops/dora/blockdb/types" dtypes "github.com/ethpandaops/dora/types" "github.com/minio/minio-go/v7" @@ -20,6 +20,10 @@ type S3Engine struct { client *minio.Client bucket string pathPrefix string + config dtypes.S3BlockDBConfig + + // Range request support (configured via EnableRangeRequests) + rangeRequestsEnabled bool } func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) { @@ -42,9 +46,11 @@ func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) { } engine := &S3Engine{ - client: client, - bucket: config.Bucket, - pathPrefix: strings.TrimPrefix(config.Path, "/"), + client: client, + bucket: config.Bucket, + pathPrefix: strings.TrimPrefix(config.Path, "/"), + config: config, + rangeRequestsEnabled: config.EnableRangeRequests, } return engine, nil @@ -59,158 +65,457 @@ func (e *S3Engine) getObjectKey(root []byte, slot uint64) string { return path.Join(e.pathPrefix, fmt.Sprintf("%06d", slot/10000), fmt.Sprintf("%010d_%s", slot, rootHex)) } -type objectMetadata struct { - objVersion uint32 - headerLength uint32 - bodyVersion uint32 - bodyLength uint32 +// GetStoredComponents returns which components exist for a block by reading metadata. +func (e *S3Engine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + key := e.getObjectKey(root, slot) + + // Read just the metadata + meta, err := e.readMetadata(ctx, key) + if err != nil { + return 0, err + } + if meta == nil { + return 0, nil + } + + return meta.storedFlags(), nil } -func (e *S3Engine) readObjectMetadata(data []byte) (*objectMetadata, int, error) { - metadataLength := 4 - metadata := &objectMetadata{ - objVersion: binary.BigEndian.Uint32(data[:4]), +// readMetadata reads object metadata using range request if enabled, otherwise full read. +func (e *S3Engine) readMetadata(ctx context.Context, key string) (*objectMetadata, error) { + if e.config.EnableRangeRequests { + meta, err := e.readMetadataWithRange(ctx, key) + if err == nil { + return meta, nil + } + // Fall through to full read on error + } + + // Full read fallback + obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{}) + if err != nil { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { + return nil, nil + } + return nil, fmt.Errorf("failed to get object: %w", err) } + defer obj.Close() - switch metadata.objVersion { - case 1: - metadata.headerLength = binary.BigEndian.Uint32(data[4:8]) - metadata.bodyVersion = binary.BigEndian.Uint32(data[8:12]) - metadata.bodyLength = binary.BigEndian.Uint32(data[12:16]) - metadataLength += 12 + buf := make([]byte, maxMetadataSize) + n, err := obj.Read(buf) + if (err != nil && err != io.EOF) || n == 0 { + return nil, fmt.Errorf("failed to read metadata: %w", err) } - return metadata, metadataLength, nil + return readObjectMetadata(buf[:n]) } -func (e *S3Engine) writeObjectMetadata(metadata *objectMetadata) []byte { - data := make([]byte, 4, 16) - binary.BigEndian.PutUint32(data, metadata.objVersion) +// readMetadataWithRange reads metadata using HTTP Range request. +func (e *S3Engine) readMetadataWithRange(ctx context.Context, key string) (*objectMetadata, error) { + opts := minio.GetObjectOptions{} + if err := opts.SetRange(0, int64(maxMetadataSize-1)); err != nil { + return nil, err + } - switch metadata.objVersion { - case 1: - data = binary.BigEndian.AppendUint32(data, metadata.headerLength) - data = binary.BigEndian.AppendUint32(data, metadata.bodyVersion) - data = binary.BigEndian.AppendUint32(data, metadata.bodyLength) + obj, err := e.client.GetObject(ctx, e.bucket, key, opts) + if err != nil { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { + return nil, nil + } + return nil, fmt.Errorf("failed to get object with range: %w", err) + } + defer obj.Close() + + buf := make([]byte, maxMetadataSize) + n, err := obj.Read(buf) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("failed to read range: %w", err) } - return data + return readObjectMetadata(buf[:n]) } -func (e *S3Engine) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) { +// GetBlock retrieves block data with selective loading based on flags. +func (e *S3Engine) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { key := e.getObjectKey(root, slot) + // Try range-based loading if enabled + if e.config.EnableRangeRequests && e.rangeRequestsEnabled { + data, err := e.getBlockWithRanges(ctx, key, flags, parseBlock, parsePayload) + if err == nil { + return data, nil + } + // Fall through to full read on error + } + + // Full read fallback + return e.getBlockFull(ctx, key, flags, parseBlock, parsePayload) +} + +// getBlockWithRanges uses a single range request for selective loading. +// Makes exactly 2 GET requests: one for metadata, one for all requested data. +func (e *S3Engine) getBlockWithRanges( + ctx context.Context, + key string, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + // First, get metadata (1 GET request) + meta, err := e.readMetadataWithRange(ctx, key) + if err != nil { + return nil, err + } + if meta == nil { + return nil, nil + } + + // Calculate the single byte range spanning all requested components + rangeStart, rangeEnd := meta.getDataRange(flags) + if rangeStart < 0 { + // No data to fetch + return &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), + }, nil + } + + // Fetch all requested data in a single GET request + opts := minio.GetObjectOptions{} + if err := opts.SetRange(rangeStart, rangeEnd); err != nil { + return nil, err + } + + obj, err := e.client.GetObject(ctx, e.bucket, key, opts) + if err != nil { + return nil, fmt.Errorf("failed to get object range: %w", err) + } + defer obj.Close() + + data, err := io.ReadAll(obj) + if err != nil { + return nil, fmt.Errorf("failed to read object range: %w", err) + } + + // Extract requested components from the fetched data + return e.extractComponents(meta, flags, data, rangeStart, parseBlock, parsePayload) +} + +// extractComponents extracts requested components from fetched data. +func (e *S3Engine) extractComponents( + meta *objectMetadata, + flags types.BlockDataFlags, + data []byte, + dataStartOffset int64, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + blockData := &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), + } + + // Extract header if requested + if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 { + start := int64(meta.headerOffset()) - dataStartOffset + end := start + int64(meta.HeaderLength) + if start >= 0 && end <= int64(len(data)) { + blockData.HeaderData = data[start:end] + } + } + + // Extract body if requested + if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 { + start := int64(meta.bodyOffset()) - dataStartOffset + end := start + int64(meta.BodyLength) + if start >= 0 && end <= int64(len(data)) { + bodyData := data[start:end] + if parseBlock != nil { + body, err := parseBlock(uint64(meta.BodyVersion), bodyData) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = bodyData + } + } + } + + // Extract payload if requested (v2+) + if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 { + start := int64(meta.payloadOffset()) - dataStartOffset + end := start + int64(meta.PayloadLength) + if start >= 0 && end <= int64(len(data)) { + payloadData := data[start:end] + if parsePayload != nil { + payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = payloadData + } + } + } + + // Extract BAL if requested (v2+) + if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 { + start := int64(meta.balOffset()) - dataStartOffset + end := start + int64(meta.BalLength) + if start >= 0 && end <= int64(len(data)) { + blockData.BalData = data[start:end] + } + } + + return blockData, nil +} + +// getBlockFull performs a full object read (fallback when range requests fail). +func (e *S3Engine) getBlockFull( + ctx context.Context, + key string, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{}) if err != nil { - if minio.ToErrorResponse(err).Code == "NoSuchKey" { + errResp := minio.ToErrorResponse(err) + if errResp.Code == "NoSuchKey" { return nil, nil } return nil, fmt.Errorf("failed to get object: %w", err) } defer obj.Close() - // read metadata - buf := make([]byte, 1024) - buflen, err := obj.Read(buf) - if (err != nil && err != io.EOF) || buflen == 0 { - return nil, fmt.Errorf("failed to read metadata: %w", err) + // Read entire object + data, err := io.ReadAll(obj) + if err != nil { + return nil, fmt.Errorf("failed to read object: %w", err) } - metadata, metadataLength, err := e.readObjectMetadata(buf) + // Parse metadata + meta, err := readObjectMetadata(data) if err != nil { return nil, fmt.Errorf("failed to read metadata: %w", err) } - headerData := make([]byte, metadata.headerLength) - headerOffset := 0 - if buflen > metadataLength { - copy(headerData, buf[metadataLength:buflen]) - headerOffset = buflen - metadataLength + blockData := &types.BlockData{ + HeaderVersion: uint64(meta.ObjVersion), + BodyVersion: uint64(meta.BodyVersion), + PayloadVersion: uint64(meta.PayloadVersion), + BalVersion: uint64(meta.BalVersion), } - if buflen < int(metadataLength)+int(metadata.headerLength) { - _, err = obj.Read(headerData[headerOffset:]) - if err != nil { - return nil, fmt.Errorf("failed to read header data: %w", err) - } - } + metaSize := meta.metadataSize() - bodyData := make([]byte, metadata.bodyLength) - bodyOffset := 0 - if buflen > int(metadataLength)+int(metadata.headerLength) { - copy(bodyData, buf[int(metadataLength)+int(metadata.headerLength):buflen]) - bodyOffset = buflen - int(metadataLength) - int(metadata.headerLength) + // Extract header if requested + if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 { + headerEnd := metaSize + int(meta.HeaderLength) + if headerEnd <= len(data) { + blockData.HeaderData = data[metaSize:headerEnd] + } } - if buflen < int(metadataLength)+int(metadata.headerLength)+int(metadata.bodyLength) { - _, err = obj.Read(bodyData[bodyOffset:]) - if err != nil { - return nil, fmt.Errorf("failed to read body data: %w", err) + // Extract body if requested + if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 { + bodyStart := metaSize + int(meta.HeaderLength) + bodyEnd := bodyStart + int(meta.BodyLength) + if bodyEnd <= len(data) { + bodyData := data[bodyStart:bodyEnd] + if parseBlock != nil { + body, err := parseBlock(uint64(meta.BodyVersion), bodyData) + if err != nil { + return nil, fmt.Errorf("failed to parse body: %w", err) + } + blockData.Body = body + } else { + blockData.BodyData = bodyData + } } } - blockData := &types.BlockData{ - HeaderVersion: uint64(metadata.objVersion), - HeaderData: headerData, - BodyVersion: uint64(metadata.bodyVersion), + // Extract payload if requested (v2+) + if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 { + payloadStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + payloadEnd := payloadStart + int(meta.PayloadLength) + if payloadEnd <= len(data) { + payloadData := data[payloadStart:payloadEnd] + if parsePayload != nil { + payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData) + if err != nil { + return nil, fmt.Errorf("failed to parse payload: %w", err) + } + blockData.Payload = payload + } else { + blockData.PayloadData = payloadData + } + } } - if parseBlock != nil { - body, err := parseBlock(uint64(metadata.bodyVersion), bodyData) - if err != nil { - return nil, fmt.Errorf("failed to parse body: %w", err) + // Extract BAL if requested (v3+) + if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 { + balStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + int(meta.PayloadLength) + balEnd := balStart + int(meta.BalLength) + if balEnd <= len(data) { + blockData.BalData = data[balStart:balEnd] } - - blockData.Body = body - } else { - blockData.BodyData = bodyData } return blockData, nil } -func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) { +// AddBlock stores block data. Returns (added, updated, error). +func (e *S3Engine) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { key := e.getObjectKey(root, slot) - // Check if object already exists - stat, err := e.client.StatObject(ctx, e.bucket, key, minio.StatObjectOptions{}) - if err == nil && stat.Size > 0 { - return false, nil + // Check what components already exist + existingMeta, err := e.readMetadata(ctx, key) + if err != nil && err.Error() != "failed to get object: NoSuchKey" { + // Ignore "not found" errors + existingFlags, _ := e.GetStoredComponents(ctx, slot, root) + if existingFlags == 0 { + existingMeta = nil + } } + // Get the new data blockData, err := dataCb() if err != nil { - return false, fmt.Errorf("failed to get block data: %w", err) + return false, false, fmt.Errorf("failed to get block data: %w", err) + } + + // Calculate what we already have + var existingFlags types.BlockDataFlags + if existingMeta != nil { + existingFlags = existingMeta.storedFlags() + } + + // Calculate what the new data provides + var newFlags types.BlockDataFlags + if len(blockData.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(blockData.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if blockData.BalVersion != 0 && len(blockData.BalData) > 0 { + newFlags |= types.BlockDataFlagBal } - metadata := &objectMetadata{ - objVersion: uint32(blockData.HeaderVersion), - headerLength: uint32(len(blockData.HeaderData)), - bodyVersion: uint32(blockData.BodyVersion), - bodyLength: uint32(len(blockData.BodyData)), + // Check if we need to update (new data has more components) + needsUpdate := (newFlags &^ existingFlags) != 0 + isNew := existingFlags == 0 + + if !isNew && !needsUpdate { + // Already have all the data + return false, false, nil } - metadataBytes := e.writeObjectMetadata(metadata) - metadataLength := len(metadataBytes) + // If updating, merge with existing data + finalData := blockData + if !isNew && needsUpdate { + // Fetch existing data and merge + existingData, err := e.GetBlock(ctx, slot, root, types.BlockDataFlagAll, nil, nil) + if err == nil && existingData != nil { + finalData = mergeBlockData(existingData, blockData) + } + } - // Prepare data with header and body versions and lengths - data := make([]byte, metadataLength+int(metadata.headerLength)+int(metadata.bodyLength)) - copy(data[:metadataLength], metadataBytes) - copy(data[metadataLength:metadataLength+int(metadata.headerLength)], blockData.HeaderData) - copy(data[metadataLength+int(metadata.headerLength):], blockData.BodyData) + // Write object (v1 for pre-gloas, v2 for gloas+) + metaBytes := writeObjectMetadata(finalData) - // Upload object + // Calculate total size and build reader chain (avoids copying to concatenated buffer) + totalSize := int64(len(metaBytes) + len(finalData.HeaderData) + len(finalData.BodyData)) + readers := []io.Reader{ + bytes.NewReader(metaBytes), + bytes.NewReader(finalData.HeaderData), + bytes.NewReader(finalData.BodyData), + } + + if finalData.BodyVersion >= uint64(spec.DataVersionGloas) { + totalSize += int64(len(finalData.PayloadData) + len(finalData.BalData)) + readers = append(readers, + bytes.NewReader(finalData.PayloadData), + bytes.NewReader(finalData.BalData), + ) + } + + // Upload object using MultiReader to stream without extra buffer allocation _, err = e.client.PutObject( ctx, e.bucket, key, - bytes.NewReader(data), - int64(len(data)), + io.MultiReader(readers...), + totalSize, minio.PutObjectOptions{ContentType: "application/octet-stream"}, ) if err != nil { - return false, fmt.Errorf("failed to upload block: %w", err) + return false, false, fmt.Errorf("failed to upload block: %w", err) + } + + return isNew, !isNew && needsUpdate, nil +} + +// mergeBlockData merges existing data with new data (new takes precedence for non-empty fields). +func mergeBlockData(existing, new *types.BlockData) *types.BlockData { + result := &types.BlockData{} + + // Use new data if available, otherwise keep existing + if len(new.HeaderData) > 0 { + result.HeaderVersion = new.HeaderVersion + result.HeaderData = new.HeaderData + } else { + result.HeaderVersion = existing.HeaderVersion + result.HeaderData = existing.HeaderData + } + + if len(new.BodyData) > 0 { + result.BodyVersion = new.BodyVersion + result.BodyData = new.BodyData + } else { + result.BodyVersion = existing.BodyVersion + result.BodyData = existing.BodyData + } + + if new.PayloadVersion != 0 && len(new.PayloadData) > 0 { + result.PayloadVersion = new.PayloadVersion + result.PayloadData = new.PayloadData + } else { + result.PayloadVersion = existing.PayloadVersion + result.PayloadData = existing.PayloadData + } + + if new.BalVersion != 0 && len(new.BalData) > 0 { + result.BalVersion = new.BalVersion + result.BalData = new.BalData + } else { + result.BalVersion = existing.BalVersion + result.BalData = existing.BalData } - return true, nil + return result } diff --git a/blockdb/tiered/tiered.go b/blockdb/tiered/tiered.go new file mode 100644 index 000000000..04f05a16f --- /dev/null +++ b/blockdb/tiered/tiered.go @@ -0,0 +1,278 @@ +package tiered + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/blockdb/pebble" + "github.com/ethpandaops/dora/blockdb/s3" + "github.com/ethpandaops/dora/blockdb/types" + dtypes "github.com/ethpandaops/dora/types" +) + +// TieredEngine combines Pebble (cache) and S3 (primary storage) in a tiered architecture. +// Reads check cache first, then fall back to S3. +// Writes go to both (write-through). +type TieredEngine struct { + cache *pebble.PebbleEngine + primary *s3.S3Engine + cleanup *pebble.CacheCleanup + logger logrus.FieldLogger +} + +// NewTieredEngine creates a new tiered storage engine. +func NewTieredEngine(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) (types.BlockDbEngine, error) { + // Initialize Pebble cache + cacheEngine, err := pebble.NewPebbleEngine(config.Pebble) + if err != nil { + return nil, fmt.Errorf("failed to initialize pebble cache: %w", err) + } + + pebbleEngine, ok := cacheEngine.(*pebble.PebbleEngine) + if !ok { + return nil, fmt.Errorf("unexpected pebble engine type") + } + + // Initialize S3 primary storage + primaryEngine, err := s3.NewS3Engine(config.S3) + if err != nil { + cacheEngine.Close() + return nil, fmt.Errorf("failed to initialize s3 primary storage: %w", err) + } + + s3Engine, ok := primaryEngine.(*s3.S3Engine) + if !ok { + cacheEngine.Close() + return nil, fmt.Errorf("unexpected s3 engine type") + } + + // Initialize cache cleanup + cleanup := pebble.NewCacheCleanup(pebbleEngine, logger) + cleanup.Start() + + return &TieredEngine{ + cache: pebbleEngine, + primary: s3Engine, + cleanup: cleanup, + logger: logger.WithField("component", "tiered-blockdb"), + }, nil +} + +// Close closes both storage engines. +func (e *TieredEngine) Close() error { + if e.cleanup != nil { + e.cleanup.Stop() + } + + var errs []error + if err := e.cache.Close(); err != nil { + errs = append(errs, fmt.Errorf("cache close: %w", err)) + } + if err := e.primary.Close(); err != nil { + errs = append(errs, fmt.Errorf("primary close: %w", err)) + } + + if len(errs) > 0 { + return errs[0] + } + return nil +} + +// GetStoredComponents returns which components exist for a block. +// Checks cache first, then S3. +func (e *TieredEngine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) { + // Check cache first + cacheFlags, err := e.cache.GetStoredComponents(ctx, slot, root) + if err != nil { + e.logger.Debugf("cache GetStoredComponents error: %v", err) + } + + // If cache has all components, return early + if cacheFlags == types.BlockDataFlagAll { + return cacheFlags, nil + } + + // Check S3 for additional components + s3Flags, err := e.primary.GetStoredComponents(ctx, slot, root) + if err != nil { + return cacheFlags, nil // Return cache result on S3 error + } + + return cacheFlags | s3Flags, nil +} + +// GetBlock retrieves block data with selective loading. +// Checks cache first, fetches missing components from S3. +func (e *TieredEngine) GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags types.BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), +) (*types.BlockData, error) { + // Check what's in cache + cacheFlags, _ := e.cache.GetStoredComponents(ctx, slot, root) + + // Determine what we can get from cache vs S3 + cacheRequestFlags := flags & cacheFlags + s3RequestFlags := flags &^ cacheFlags + + result := &types.BlockData{} + + // Get from cache + if cacheRequestFlags != 0 { + cacheData, err := e.cache.GetBlock(ctx, slot, root, cacheRequestFlags, parseBlock, parsePayload) + if err != nil { + e.logger.Debugf("cache GetBlock error: %v", err) + } else if cacheData != nil { + mergeBlockDataInto(result, cacheData) + + // Record LRU access + if e.cleanup != nil { + e.cleanup.RecordAccess(root, cacheRequestFlags) + } + } + } + + // Get missing components from S3 + if s3RequestFlags != 0 { + s3Data, err := e.primary.GetBlock(ctx, slot, root, s3RequestFlags, parseBlock, parsePayload) + if err != nil { + e.logger.Debugf("s3 GetBlock error: %v", err) + } else if s3Data != nil { + mergeBlockDataInto(result, s3Data) + + // Cache the S3 data for future reads + e.cacheS3Data(ctx, slot, root, s3Data, s3RequestFlags) + } + } + + return result, nil +} + +// cacheS3Data stores S3 data in the cache for future reads. +func (e *TieredEngine) cacheS3Data(ctx context.Context, slot uint64, root []byte, data *types.BlockData, flags types.BlockDataFlags) { + // Build cache data with only the components we fetched from S3 + cacheData := &types.BlockData{} + + if flags.Has(types.BlockDataFlagHeader) && len(data.HeaderData) > 0 { + cacheData.HeaderVersion = data.HeaderVersion + cacheData.HeaderData = data.HeaderData + } + if flags.Has(types.BlockDataFlagBody) && len(data.BodyData) > 0 { + cacheData.BodyVersion = data.BodyVersion + cacheData.BodyData = data.BodyData + } + if flags.Has(types.BlockDataFlagPayload) && len(data.PayloadData) > 0 { + cacheData.PayloadVersion = data.PayloadVersion + cacheData.PayloadData = data.PayloadData + } + if flags.Has(types.BlockDataFlagBal) && len(data.BalData) > 0 { + cacheData.BalVersion = data.BalVersion + cacheData.BalData = data.BalData + } + + // Add to cache (ignore errors - caching is best effort) + _, _, err := e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return cacheData, nil + }) + if err != nil { + e.logger.Debugf("failed to cache S3 data: %v", err) + } + + // Flush LRU updates since we did a write + if e.cleanup != nil { + e.cleanup.FlushLRU() + } +} + +// AddBlock stores block data using write-through to both cache and S3. +// Returns (added, updated, error). +func (e *TieredEngine) AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*types.BlockData, error), +) (bool, bool, error) { + // Get the data once + data, err := dataCb() + if err != nil { + return false, false, err + } + + // Check what components already exist (in cache or S3) + existingFlags, _ := e.GetStoredComponents(ctx, slot, root) + + // Determine what new data provides + var newFlags types.BlockDataFlags + if len(data.HeaderData) > 0 { + newFlags |= types.BlockDataFlagHeader + } + if len(data.BodyData) > 0 { + newFlags |= types.BlockDataFlagBody + } + if data.PayloadVersion != 0 && len(data.PayloadData) > 0 { + newFlags |= types.BlockDataFlagPayload + } + if data.BalVersion != 0 && len(data.BalData) > 0 { + newFlags |= types.BlockDataFlagBal + } + + // Check if we need to update + needsUpdate := (newFlags &^ existingFlags) != 0 + isNew := existingFlags == 0 + + if !isNew && !needsUpdate { + return false, false, nil + } + + // Write-through: write to S3 first (primary), then cache + // S3 handles merging with existing data + s3Added, s3Updated, err := e.primary.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return data, nil + }) + if err != nil { + return false, false, fmt.Errorf("failed to write to S3: %w", err) + } + + // Write to cache + _, _, err = e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) { + return data, nil + }) + if err != nil { + e.logger.Warnf("failed to write to cache: %v", err) + // Don't fail - S3 write succeeded + } + + // Flush LRU updates after write + if e.cleanup != nil { + e.cleanup.FlushLRU() + } + + return s3Added, s3Updated, nil +} + +// mergeBlockDataInto merges source data into target (source values take precedence for non-empty fields). +func mergeBlockDataInto(target, source *types.BlockData) { + if source.HeaderVersion != 0 || len(source.HeaderData) > 0 { + target.HeaderVersion = source.HeaderVersion + target.HeaderData = source.HeaderData + } + if source.BodyVersion != 0 || len(source.BodyData) > 0 { + target.BodyVersion = source.BodyVersion + target.BodyData = source.BodyData + target.Body = source.Body + } + if source.PayloadVersion != 0 || len(source.PayloadData) > 0 { + target.PayloadVersion = source.PayloadVersion + target.PayloadData = source.PayloadData + target.Payload = source.Payload + } + if source.BalVersion != 0 || len(source.BalData) > 0 { + target.BalVersion = source.BalVersion + target.BalData = source.BalData + } +} diff --git a/blockdb/types/engine.go b/blockdb/types/engine.go index 80db81f67..86b352ddf 100644 --- a/blockdb/types/engine.go +++ b/blockdb/types/engine.go @@ -2,13 +2,25 @@ package types import "context" -// BlockData holds beacon block header and body data. +// BlockData contains all data components for a block. type BlockData struct { + // Header data HeaderVersion uint64 HeaderData []byte - BodyVersion uint64 - BodyData []byte - Body interface{} + + // Body data + BodyVersion uint64 + BodyData []byte + Body any // Parsed body (optional) + + // Execution payload data (ePBS) + PayloadVersion uint64 + PayloadData []byte + Payload any // Parsed payload (optional) + + // Block access list data + BalVersion uint64 + BalData []byte } // ExecDataTxSections holds all compressed section data for a single @@ -22,11 +34,35 @@ type ExecDataTxSections struct { StateChangeData []byte // snappy-compressed, nil if section not present } -// BlockDbEngine is the interface for beacon block storage. +// BlockDbEngine defines the interface for block database engines. type BlockDbEngine interface { + // Close closes the database engine. Close() error - GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*BlockData, error) - AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*BlockData, error)) (bool, error) + + // GetBlock retrieves block data with selective loading based on flags. + // If parseBlock is nil, raw body data is stored in BlockData.BodyData. + // If parsePayload is nil, raw payload data is stored in BlockData.PayloadData. + GetBlock( + ctx context.Context, + slot uint64, + root []byte, + flags BlockDataFlags, + parseBlock func(uint64, []byte) (any, error), + parsePayload func(uint64, []byte) (any, error), + ) (*BlockData, error) + + // AddBlock stores block data. Returns: + // - added: true if a new block was created + // - updated: true if an existing block was updated with new components + AddBlock( + ctx context.Context, + slot uint64, + root []byte, + dataCb func() (*BlockData, error), + ) (added bool, updated bool, err error) + + // GetStoredComponents returns which components exist for a block. + GetStoredComponents(ctx context.Context, slot uint64, root []byte) (BlockDataFlags, error) } // ExecDataEngine is the interface for per-block execution data storage. diff --git a/blockdb/types/flags.go b/blockdb/types/flags.go new file mode 100644 index 000000000..34aff4dbb --- /dev/null +++ b/blockdb/types/flags.go @@ -0,0 +1,38 @@ +package types + +// BlockDataFlags specifies which components to load from storage. +type BlockDataFlags uint8 + +const ( + // BlockDataFlagHeader requests the block header data. + BlockDataFlagHeader BlockDataFlags = 1 << iota // 0x01 + // BlockDataFlagBody requests the block body data. + BlockDataFlagBody // 0x02 + // BlockDataFlagPayload requests the execution payload data. + BlockDataFlagPayload // 0x04 + // BlockDataFlagBal requests the block access list data. + BlockDataFlagBal // 0x08 + + // BlockDataFlagAll requests all block components. + BlockDataFlagAll = BlockDataFlagHeader | BlockDataFlagBody | BlockDataFlagPayload | BlockDataFlagBal +) + +// Has returns true if the flag set contains the specified flag. +func (f BlockDataFlags) Has(flag BlockDataFlags) bool { + return f&flag == flag +} + +// HasAny returns true if the flag set contains any of the specified flags. +func (f BlockDataFlags) HasAny(flags BlockDataFlags) bool { + return f&flags != 0 +} + +// Add returns a new flag set with the specified flag added. +func (f BlockDataFlags) Add(flag BlockDataFlags) BlockDataFlags { + return f | flag +} + +// Remove returns a new flag set with the specified flag removed. +func (f BlockDataFlags) Remove(flag BlockDataFlags) BlockDataFlags { + return f &^ flag +} diff --git a/clients/consensus/chainspec.go b/clients/consensus/chainspec.go index 8fa135c12..98fd9ba67 100644 --- a/clients/consensus/chainspec.go +++ b/clients/consensus/chainspec.go @@ -53,6 +53,8 @@ type ChainSpecConfig struct { ElectraForkEpoch *uint64 `yaml:"ELECTRA_FORK_EPOCH" check-if-fork:"ElectraForkEpoch"` FuluForkVersion phase0.Version `yaml:"FULU_FORK_VERSION" check-if-fork:"FuluForkEpoch"` FuluForkEpoch *uint64 `yaml:"FULU_FORK_EPOCH" check-if-fork:"FuluForkEpoch"` + GloasForkVersion phase0.Version `yaml:"GLOAS_FORK_VERSION" check-if-fork:"GloasForkEpoch"` + GloasForkEpoch *uint64 `yaml:"GLOAS_FORK_EPOCH" check-if-fork:"GloasForkEpoch"` // Time parameters SecondsPerSlot uint64 `yaml:"SECONDS_PER_SLOT"` @@ -118,6 +120,9 @@ type ChainSpecConfig struct { ValidatorCustodyRequirement *uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"` BalancePerAdditionalCustodyGroup *uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" check-if-fork:"FuluForkEpoch"` BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" check-if-fork:"FuluForkEpoch"` + + // Gloas + MinBuilderWithdrawabilityDelay uint64 `yaml:"MIN_BUILDER_WITHDRAWABILITY_DELAY" check-if-fork:"GloasForkEpoch"` } type ChainSpecPreset struct { @@ -205,6 +210,13 @@ type ChainSpecPreset struct { FieldElementsPerExtBlob uint64 `yaml:"FIELD_ELEMENTS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"` CellsPerExtBlob uint64 `yaml:"CELLS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"` NumberOfColumns *uint64 `yaml:"NUMBER_OF_COLUMNS" check-if-fork:"FuluForkEpoch"` + + // Gloas + PtcSize uint64 `yaml:"PTC_SIZE" check-if-fork:"GloasForkEpoch"` + MaxPayloadAttestations uint64 `yaml:"MAX_PAYLOAD_ATTESTATIONS" check-if-fork:"GloasForkEpoch"` + BuilderRegistryLimit uint64 `yaml:"BUILDER_REGISTRY_LIMIT" check-if-fork:"GloasForkEpoch"` + BuilderPendingWithdrawalsLimit uint64 `yaml:"BUILDER_PENDING_WITHDRAWALS_LIMIT" check-if-fork:"GloasForkEpoch"` + MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" check-if-fork:"GloasForkEpoch"` } type ChainSpecDomainTypes struct { @@ -219,6 +231,9 @@ type ChainSpecDomainTypes struct { DomainSyncCommitteeSelectionProof phase0.DomainType `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF"` DomainContributionAndProof phase0.DomainType `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF"` DomainBlsToExecutionChange phase0.DomainType `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE"` + DomainBeaconBuilder phase0.DomainType `yaml:"DOMAIN_BEACON_BUILDER" check-if-fork:"GloasForkEpoch"` + DomainPtcAttester phase0.DomainType `yaml:"DOMAIN_PTC_ATTESTER" check-if-fork:"GloasForkEpoch"` + DomainProposerPreferences phase0.DomainType `yaml:"DOMAIN_PROPOSER_PREFERENCES" check-if-fork:"GloasForkEpoch"` } type ChainSpec struct { diff --git a/clients/consensus/chainstate.go b/clients/consensus/chainstate.go index f96a59e72..cfa3d08ef 100644 --- a/clients/consensus/chainstate.go +++ b/clients/consensus/chainstate.go @@ -361,6 +361,34 @@ func (cs *ChainState) GetForkDigestForEpoch(epoch phase0.Epoch) phase0.ForkDiges return cs.GetForkDigest(currentForkVersion, currentBlobParams) } +func (cs *ChainState) GetBlobScheduleForEpoch(epoch phase0.Epoch) *BlobScheduleEntry { + if cs.specs == nil { + return nil + } + + var blobSchedule *BlobScheduleEntry + + if cs.specs.ElectraForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.ElectraForkEpoch) { + blobSchedule = &BlobScheduleEntry{ + Epoch: *cs.specs.ElectraForkEpoch, + MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlockElectra, + } + } else if cs.specs.DenebForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.DenebForkEpoch) { + blobSchedule = &BlobScheduleEntry{ + Epoch: *cs.specs.DenebForkEpoch, + MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlock, + } + } + + for i, blobScheduleEntry := range cs.specs.BlobSchedule { + if blobScheduleEntry.Epoch <= uint64(epoch) { + blobSchedule = &cs.specs.BlobSchedule[i] + } + } + + return blobSchedule +} + func (cs *ChainState) GetForkDigest(forkVersion phase0.Version, blobParams *BlobScheduleEntry) phase0.ForkDigest { if cs.specs == nil || cs.genesis == nil { return phase0.ForkDigest{} @@ -444,6 +472,22 @@ func (cs *ChainState) GetValidatorChurnLimit(validatorCount uint64) uint64 { return adaptable } +func (cs *ChainState) IsEip7732Enabled(epoch phase0.Epoch) bool { + if cs.specs == nil { + return false + } + + return cs.specs.GloasForkEpoch != nil && phase0.Epoch(*cs.specs.GloasForkEpoch) <= epoch +} + +func (cs *ChainState) IsFuluEnabled(epoch phase0.Epoch) bool { + if cs.specs == nil { + return false + } + + return cs.specs.FuluForkEpoch != nil && phase0.Epoch(*cs.specs.FuluForkEpoch) <= epoch +} + func (cs *ChainState) GetBalanceChurnLimit(totalActiveBalance uint64) uint64 { if cs.specs == nil { return 0 diff --git a/clients/consensus/client.go b/clients/consensus/client.go index 234c48eeb..ad514acdf 100644 --- a/clients/consensus/client.go +++ b/clients/consensus/client.go @@ -6,6 +6,7 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" @@ -23,36 +24,38 @@ type ClientConfig struct { } type Client struct { - pool *Pool - clientIdx uint16 - endpointConfig *ClientConfig - clientCtx context.Context - clientCtxCancel context.CancelFunc - rpcClient *rpc.BeaconClient - logger *logrus.Entry - isOnline bool - isSyncing bool - isOptimistic bool - versionStr string - nodeIdentity *rpc.NodeIdentity - clientType ClientType - lastEvent time.Time - retryCounter uint64 - lastError error - headMutex sync.RWMutex - headRoot phase0.Root - headSlot phase0.Slot - justifiedRoot phase0.Root - justifiedEpoch phase0.Epoch - finalizedRoot phase0.Root - finalizedEpoch phase0.Epoch - lastFinalityUpdateEpoch phase0.Epoch - lastMetadataUpdateEpoch phase0.Epoch - lastMetadataUpdateTime time.Time - lastSyncUpdateEpoch phase0.Epoch - peers []*v1.Peer - streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent] - checkpointDispatcher utils.Dispatcher[*v1.Finality] + pool *Pool + clientIdx uint16 + endpointConfig *ClientConfig + clientCtx context.Context + clientCtxCancel context.CancelFunc + rpcClient *rpc.BeaconClient + logger *logrus.Entry + isOnline bool + isSyncing bool + isOptimistic bool + versionStr string + nodeIdentity *rpc.NodeIdentity + clientType ClientType + lastEvent time.Time + retryCounter uint64 + lastError error + headMutex sync.RWMutex + headRoot phase0.Root + headSlot phase0.Slot + justifiedRoot phase0.Root + justifiedEpoch phase0.Epoch + finalizedRoot phase0.Root + finalizedEpoch phase0.Epoch + lastFinalityUpdateEpoch phase0.Epoch + lastMetadataUpdateEpoch phase0.Epoch + lastMetadataUpdateTime time.Time + lastSyncUpdateEpoch phase0.Epoch + peers []*v1.Peer + streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent] + checkpointDispatcher utils.Dispatcher[*v1.Finality] + executionPayloadDispatcher utils.Dispatcher[*v1.ExecutionPayloadAvailableEvent] + executionPayloadBidDispatcher utils.Dispatcher[*gloas.SignedExecutionPayloadBid] specWarnings []string // warnings from incomplete spec checks specs map[string]interface{} @@ -99,6 +102,14 @@ func (client *Client) SubscribeFinalizedEvent(capacity int) *utils.Subscription[ return client.checkpointDispatcher.Subscribe(capacity, false) } +func (client *Client) SubscribeExecutionPayloadAvailableEvent(capacity int, blocking bool) *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] { + return client.executionPayloadDispatcher.Subscribe(capacity, blocking) +} + +func (client *Client) SubscribeExecutionPayloadBidEvent(capacity int, blocking bool) *utils.Subscription[*gloas.SignedExecutionPayloadBid] { + return client.executionPayloadBidDispatcher.Subscribe(capacity, blocking) +} + func (client *Client) GetPool() *Pool { return client.pool } diff --git a/clients/consensus/clientlogic.go b/clients/consensus/clientlogic.go index 146af4072..84f63bb83 100644 --- a/clients/consensus/clientlogic.go +++ b/clients/consensus/clientlogic.go @@ -8,6 +8,7 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/sirupsen/logrus" @@ -133,7 +134,11 @@ func (client *Client) runClientLogic() error { } // start event stream - blockStream := client.rpcClient.NewBlockStream(client.clientCtx, client.logger, rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent) + blockStream := client.rpcClient.NewBlockStream( + client.clientCtx, + client.logger, + rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent|rpc.StreamExecutionPayloadEvent|rpc.StreamExecutionPayloadBidEvent, + ) defer blockStream.Close() // process events @@ -165,6 +170,12 @@ func (client *Client) runClientLogic() error { if err != nil { client.logger.Warnf("failed processing finalized event: %v", err) } + + case rpc.StreamExecutionPayloadEvent: + client.executionPayloadDispatcher.Fire(evt.Data.(*v1.ExecutionPayloadAvailableEvent)) + + case rpc.StreamExecutionPayloadBidEvent: + client.executionPayloadBidDispatcher.Fire(evt.Data.(*gloas.SignedExecutionPayloadBid)) } // fire through stream dispatcher first to preserve SSE ordering diff --git a/clients/consensus/rpc/beaconapi.go b/clients/consensus/rpc/beaconapi.go index 6768091bc..7435764e1 100644 --- a/clients/consensus/rpc/beaconapi.go +++ b/clients/consensus/rpc/beaconapi.go @@ -19,6 +19,7 @@ import ( "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/capella" "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/rs/zerolog" "github.com/sirupsen/logrus" @@ -406,6 +407,22 @@ func (bc *BeaconClient) GetBlockBodyByBlockroot(ctx context.Context, blockroot p return result.Data, nil } +func (bc *BeaconClient) GetExecutionPayloadByBlockroot(ctx context.Context, blockroot phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + provider, isProvider := bc.clientSvc.(eth2client.ExecutionPayloadProvider) + if !isProvider { + return nil, fmt.Errorf("get execution payload not supported") + } + + result, err := provider.SignedExecutionPayloadEnvelope(ctx, &api.SignedExecutionPayloadEnvelopeOpts{ + Block: fmt.Sprintf("0x%x", blockroot), + }) + if err != nil { + return nil, err + } + + return result.Data, nil +} + func (bc *BeaconClient) GetState(ctx context.Context, stateRef string) (*spec.VersionedBeaconState, error) { provider, isProvider := bc.clientSvc.(eth2client.BeaconStateProvider) if !isProvider { diff --git a/clients/consensus/rpc/beaconstream.go b/clients/consensus/rpc/beaconstream.go index be6fd92c9..9f699ac49 100644 --- a/clients/consensus/rpc/beaconstream.go +++ b/clients/consensus/rpc/beaconstream.go @@ -10,16 +10,18 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" - "github.com/donovanhide/eventsource" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/sirupsen/logrus" "github.com/ethpandaops/dora/clients/consensus/rpc/eventstream" ) const ( - StreamBlockEvent uint16 = 0x01 - StreamHeadEvent uint16 = 0x02 - StreamFinalizedEvent uint16 = 0x04 + StreamBlockEvent uint16 = 0x01 + StreamHeadEvent uint16 = 0x02 + StreamFinalizedEvent uint16 = 0x04 + StreamExecutionPayloadEvent uint16 = 0x08 + StreamExecutionPayloadBidEvent uint16 = 0x10 ) type BeaconStreamEvent struct { @@ -71,48 +73,126 @@ func (bs *BeaconStream) startStream() { bs.running = false }() - stream := bs.subscribeStream(bs.client.endpoint, bs.events) - if stream != nil { - defer stream.Close() + // Subscribe to basic events (block, head, finalized_checkpoint) + basicEvents := bs.events & (StreamBlockEvent | StreamHeadEvent | StreamFinalizedEvent) + basicStream := bs.subscribeStream(bs.client.endpoint, basicEvents) + if basicStream != nil { + defer basicStream.Close() + } - for { + // Subscribe to advanced events (execution_payload_available, execution_payload_bid) + // These are in a separate stream because clients may not support them yet, + // and subscribing to unsupported topics can cause the entire subscription to fail. + // Run in a separate goroutine so it doesn't block the basic stream. + advancedEvents := bs.events & (StreamExecutionPayloadEvent | StreamExecutionPayloadBidEvent) + advancedStreamChan := make(chan *eventstream.Stream, 1) + if advancedEvents > 0 { + go func() { + stream := bs.subscribeStream(bs.client.endpoint, advancedEvents) select { + case advancedStreamChan <- stream: case <-bs.ctx.Done(): - return - case evt := <-stream.Events: - switch evt.Event() { - case "block": - bs.processBlockEvent(evt) - case "head": - bs.processHeadEvent(evt) - case "finalized_checkpoint": - bs.processFinalizedEvent(evt) - } - case <-stream.Ready: - bs.ReadyChan <- &BeaconStreamStatus{ - Ready: true, - } - case err := <-stream.Errors: - if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") { - // this seems to be a go bug, silently reconnect to the stream - time.Sleep(10 * time.Millisecond) - stream.RetryNow() - } else { - bs.logger.Warnf("beacon block stream error: %v", err) + if stream != nil { + stream.Close() } + } + }() + } - select { - case bs.ReadyChan <- &BeaconStreamStatus{ - Ready: false, - Error: err, - }: - case <-bs.ctx.Done(): - } + var advancedStream *eventstream.Stream + defer func() { + if advancedStream != nil { + advancedStream.Close() + } + }() + + for { + select { + case <-bs.ctx.Done(): + return + + // Basic stream events + case evt := <-basicStream.Events: + switch evt.Event() { + case "block": + bs.processBlockEvent(evt) + case "head": + bs.processHeadEvent(evt) + case "finalized_checkpoint": + bs.processFinalizedEvent(evt) + } + case <-basicStream.Ready: + bs.ReadyChan <- &BeaconStreamStatus{ + Ready: true, } + case err := <-basicStream.Errors: + bs.handleStreamError(basicStream, err) + + // Advanced stream connection established + case stream := <-advancedStreamChan: + advancedStream = stream + + // Advanced stream events (no Ready/Error forwarding) + case evt := <-bs.getAdvancedStreamEvents(advancedStream): + switch evt.Event() { + case "execution_payload_available": + bs.processExecutionPayloadAvailableEvent(evt) + case "execution_payload_bid": + bs.processExecutionPayloadBidEvent(evt) + } + case <-bs.getAdvancedStreamReady(advancedStream): + // Don't forward ready events from advanced stream + case <-bs.getAdvancedStreamErrors(advancedStream): + // Silently retry - clients may not support these events yet + time.Sleep(10 * time.Millisecond) + advancedStream.RetryNow() } } } +// getAdvancedStreamEvents returns the events channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamEvents(stream *eventstream.Stream) chan eventstream.StreamEvent { + if stream == nil { + return nil + } + return stream.Events +} + +// getAdvancedStreamReady returns the ready channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamReady(stream *eventstream.Stream) chan bool { + if stream == nil { + return nil + } + return stream.Ready +} + +// getAdvancedStreamErrors returns the errors channel or a nil channel if stream is nil. +func (bs *BeaconStream) getAdvancedStreamErrors(stream *eventstream.Stream) chan error { + if stream == nil { + return nil + } + return stream.Errors +} + +// handleStreamError handles stream errors and forwards them to the ReadyChan. +func (bs *BeaconStream) handleStreamError(stream *eventstream.Stream, err error) { + if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") { + // this seems to be a go bug, silently reconnect to the stream + time.Sleep(10 * time.Millisecond) + stream.RetryNow() + } else { + bs.logger.Warnf("beacon block stream error: %v", err) + } + + select { + case bs.ReadyChan <- &BeaconStreamStatus{ + Ready: false, + Error: err, + }: + case <-bs.ctx.Done(): + } +} + func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventstream.Stream { var topics strings.Builder @@ -148,6 +228,26 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst topicsCount++ } + if events&StreamExecutionPayloadEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + + fmt.Fprintf(&topics, "execution_payload_available") + + topicsCount++ + } + + if events&StreamExecutionPayloadBidEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + + fmt.Fprintf(&topics, "execution_payload_bid") + + topicsCount++ + } + if topicsCount == 0 { return nil } @@ -179,7 +279,7 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst } } -func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) { +func (bs *BeaconStream) processBlockEvent(evt eventstream.StreamEvent) { var parsed v1.BlockEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -194,7 +294,7 @@ func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) { } } -func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { +func (bs *BeaconStream) processHeadEvent(evt eventstream.StreamEvent) { var parsed v1.HeadEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -210,7 +310,7 @@ func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { } } -func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { +func (bs *BeaconStream) processFinalizedEvent(evt eventstream.StreamEvent) { var parsed v1.FinalizedCheckpointEvent err := json.Unmarshal([]byte(evt.Data()), &parsed) @@ -225,6 +325,36 @@ func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { } } +func (bs *BeaconStream) processExecutionPayloadAvailableEvent(evt eventstream.StreamEvent) { + var parsed v1.ExecutionPayloadAvailableEvent + + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + bs.logger.Warnf("beacon block stream failed to decode execution_payload event: %v", err) + return + } + + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamExecutionPayloadEvent, + Data: &parsed, + } +} + +func (bs *BeaconStream) processExecutionPayloadBidEvent(evt eventstream.StreamEvent) { + var parsed gloas.SignedExecutionPayloadBid + + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + bs.logger.Warnf("beacon block stream failed to decode execution_payload_bid event: %v", err) + return + } + + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamExecutionPayloadBidEvent, + Data: &parsed, + } +} + func getRedactedURL(requrl string) string { var logurl string diff --git a/cmd/dora-explorer/main.go b/cmd/dora-explorer/main.go index e32f1d0c5..48c6ab8f5 100644 --- a/cmd/dora-explorer/main.go +++ b/cmd/dora-explorer/main.go @@ -233,6 +233,8 @@ func startFrontend(router *mux.Router) { router.HandleFunc("/validators/submit_withdrawals", handlers.SubmitWithdrawal).Methods("GET") router.HandleFunc("/validator/{idxOrPubKey}", handlers.Validator).Methods("GET") router.HandleFunc("/validator/{index}/slots", handlers.ValidatorSlots).Methods("GET") + router.HandleFunc("/builders", handlers.Builders).Methods("GET") + router.HandleFunc("/builder/{idxOrPubKey}", handlers.BuilderDetail).Methods("GET") if utils.Config.Frontend.Pprof { // add pprof handler diff --git a/cmd/dora-utils/blockdb_sync.go b/cmd/dora-utils/blockdb_sync.go index fdebcb81a..7905d7e9f 100644 --- a/cmd/dora-utils/blockdb_sync.go +++ b/cmd/dora-utils/blockdb_sync.go @@ -271,7 +271,7 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs return slotResult{slot: slot, err: fmt.Errorf("failed to marshal block header for slot %d: %v", slot, err), time: time.Since(t1)} } - added, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) { + added, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) { blockBody, err := client.GetRPCClient().GetBlockBodyByBlockroot(ctx, blockHeader.Root) if err != nil { return nil, fmt.Errorf("failed to get block body for slot %d: %v", slot, err) @@ -282,11 +282,29 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs return nil, fmt.Errorf("failed to marshal block body for slot %d: %v", slot, err) } + var payloadVersion uint64 + var payloadBytes []byte + + chainState := pool.GetChainState() + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(phase0.Slot(slot))) { + blockPayload, err := client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, blockHeader.Root) + if err != nil { + return nil, fmt.Errorf("failed to get block execution payload for slot %d: %v", slot, err) + } + + payloadVersion, payloadBytes, err = beacon.MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz, blockPayload, true) + if err != nil { + return nil, fmt.Errorf("failed to marshal block execution payload for slot %d: %v", slot, err) + } + } + return &btypes.BlockData{ - HeaderVersion: 1, - HeaderData: headerBytes, - BodyVersion: version, - BodyData: bodyBytes, + HeaderVersion: 1, + HeaderData: headerBytes, + BodyVersion: version, + BodyData: bodyBytes, + PayloadVersion: payloadVersion, + PayloadData: payloadBytes, }, nil }) if err != nil { diff --git a/db/block_bids.go b/db/block_bids.go new file mode 100644 index 000000000..7b2b9c6ca --- /dev/null +++ b/db/block_bids.go @@ -0,0 +1,193 @@ +package db + +import ( + "context" + "fmt" + "strings" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/jmoiron/sqlx" +) + +func InsertBids(bids []*dbtypes.BlockBid, tx *sqlx.Tx) error { + var sql strings.Builder + fmt.Fprint(&sql, + EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: "INSERT INTO block_bids ", + dbtypes.DBEngineSqlite: "INSERT OR REPLACE INTO block_bids ", + }), + "(parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment)", + " VALUES ", + ) + argIdx := 0 + fieldCount := 9 + + args := make([]any, len(bids)*fieldCount) + for i, bid := range bids { + if i > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "(") + for f := 0; f < fieldCount; f++ { + if f > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", argIdx+f+1) + } + fmt.Fprintf(&sql, ")") + + args[argIdx+0] = bid.ParentRoot + args[argIdx+1] = bid.ParentHash + args[argIdx+2] = bid.BlockHash + args[argIdx+3] = bid.FeeRecipient + args[argIdx+4] = bid.GasLimit + args[argIdx+5] = bid.BuilderIndex + args[argIdx+6] = bid.Slot + args[argIdx+7] = bid.Value + args[argIdx+8] = bid.ElPayment + argIdx += fieldCount + } + fmt.Fprint(&sql, EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: " ON CONFLICT (parent_root, parent_hash, block_hash, builder_index) DO UPDATE SET " + + "fee_recipient = excluded.fee_recipient, " + + "gas_limit = excluded.gas_limit, " + + "slot = excluded.slot, " + + "value = excluded.value, " + + "el_payment = excluded.el_payment", + dbtypes.DBEngineSqlite: "", + })) + + _, err := tx.Exec(sql.String(), args...) + if err != nil { + return err + } + return nil +} + +func GetBidsForBlockRoot(ctx context.Context, blockRoot []byte) []*dbtypes.BlockBid { + var sql strings.Builder + args := []any{ + blockRoot, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE parent_root = $1 + ORDER BY value DESC + `) + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for block root: %v", err) + return nil + } + return bids +} + +func GetBidsForSlotRange(ctx context.Context, minSlot uint64) []*dbtypes.BlockBid { + var sql strings.Builder + args := []any{ + minSlot, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE slot >= $1 + ORDER BY slot DESC, value DESC + `) + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for slot range: %v", err) + return nil + } + return bids +} + +func DeleteBidsBeforeSlot(minSlot uint64, tx *sqlx.Tx) error { + _, err := tx.Exec(`DELETE FROM block_bids WHERE slot < $1`, minSlot) + return err +} + +// GetBidsByBlockHashes returns bids for multiple block hashes and a specific builder index +// Returns a map keyed by block hash (hex string) for easy lookup +func GetBidsByBlockHashes(ctx context.Context, blockHashes [][]byte, builderIndex uint64) map[string]*dbtypes.BlockBid { + result := make(map[string]*dbtypes.BlockBid, len(blockHashes)) + if len(blockHashes) == 0 { + return result + } + + var sql strings.Builder + args := make([]any, 0, len(blockHashes)+1) + + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE builder_index = $1 AND block_hash IN (`) + + args = append(args, builderIndex) + for i, hash := range blockHashes { + if i > 0 { + fmt.Fprint(&sql, ", ") + } + fmt.Fprintf(&sql, "$%d", i+2) + args = append(args, hash) + } + fmt.Fprint(&sql, ")") + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids by block hashes: %v", err) + return result + } + + for _, bid := range bids { + key := fmt.Sprintf("%x", bid.BlockHash) + result[key] = bid + } + + return result +} + +// GetBidsByBuilderIndex returns bids submitted by a specific builder, ordered by slot descending +func GetBidsByBuilderIndex(ctx context.Context, builderIndex uint64, offset uint64, limit uint32) ([]*dbtypes.BlockBid, uint64) { + var sql strings.Builder + args := []any{ + builderIndex, + } + fmt.Fprint(&sql, ` + SELECT + parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment + FROM block_bids + WHERE builder_index = $1 + ORDER BY slot DESC, value DESC + `) + + if limit > 0 { + fmt.Fprintf(&sql, " LIMIT $%d OFFSET $%d", len(args)+1, len(args)+2) + args = append(args, limit, offset) + } + + bids := []*dbtypes.BlockBid{} + err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching bids for builder index %d: %v", builderIndex, err) + return nil, 0 + } + + // Get total count + var totalCount uint64 + err = ReaderDb.GetContext(ctx, &totalCount, `SELECT COUNT(*) FROM block_bids WHERE builder_index = $1`, builderIndex) + if err != nil { + logger.Errorf("Error while counting bids for builder index %d: %v", builderIndex, err) + return bids, 0 + } + + return bids, totalCount +} diff --git a/db/builders.go b/db/builders.go new file mode 100644 index 000000000..26d67bfa1 --- /dev/null +++ b/db/builders.go @@ -0,0 +1,450 @@ +package db + +import ( + "context" + "fmt" + "strings" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/jmoiron/sqlx" +) + +// InsertBuilder inserts a single builder into the database +func InsertBuilder(builder *dbtypes.Builder, tx *sqlx.Tx) error { + _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (pubkey) DO UPDATE SET + builder_index = excluded.builder_index, + version = excluded.version, + execution_address = excluded.execution_address, + deposit_epoch = excluded.deposit_epoch, + withdrawable_epoch = excluded.withdrawable_epoch, + superseded = excluded.superseded`, + dbtypes.DBEngineSqlite: ` + INSERT OR REPLACE INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES ($1, $2, $3, $4, $5, $6, $7)`, + }), + builder.Pubkey, + builder.BuilderIndex, + builder.Version, + builder.ExecutionAddress, + builder.DepositEpoch, + builder.WithdrawableEpoch, + builder.Superseded) + + if err != nil { + return fmt.Errorf("error inserting builder: %w", err) + } + return nil +} + +// InsertBuilderBatch inserts multiple builders in a batch +func InsertBuilderBatch(builders []*dbtypes.Builder, tx *sqlx.Tx) error { + if len(builders) == 0 { + return nil + } + + valueStrings := make([]string, len(builders)) + valueArgs := make([]any, 0, len(builders)*7) + for i, b := range builders { + valueStrings[i] = fmt.Sprintf("($%v, $%v, $%v, $%v, $%v, $%v, $%v)", + i*7+1, i*7+2, i*7+3, i*7+4, i*7+5, i*7+6, i*7+7) + valueArgs = append(valueArgs, + b.Pubkey, + b.BuilderIndex, + b.Version, + b.ExecutionAddress, + b.DepositEpoch, + b.WithdrawableEpoch, + b.Superseded) + } + + stmt := fmt.Sprintf(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES %s + ON CONFLICT (pubkey) DO UPDATE SET + builder_index = excluded.builder_index, + version = excluded.version, + execution_address = excluded.execution_address, + deposit_epoch = excluded.deposit_epoch, + withdrawable_epoch = excluded.withdrawable_epoch, + superseded = excluded.superseded`, + dbtypes.DBEngineSqlite: ` + INSERT OR REPLACE INTO builders ( + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + ) VALUES %s`, + }), strings.Join(valueStrings, ",")) + + _, err := tx.Exec(stmt, valueArgs...) + if err != nil { + return fmt.Errorf("error inserting builder batch: %w", err) + } + + return nil +} + +// GetBuilderByPubkey returns a builder by pubkey (primary key) +func GetBuilderByPubkey(ctx context.Context, pubkey []byte) *dbtypes.Builder { + builder := dbtypes.Builder{} + err := ReaderDb.GetContext(ctx, &builder, ` + SELECT * FROM builders WHERE pubkey = $1 + `, pubkey) + if err != nil { + return nil + } + return &builder +} + +// GetActiveBuilderByIndex returns the active (non-superseded) builder for a given index +func GetActiveBuilderByIndex(ctx context.Context, index uint64) *dbtypes.Builder { + builder := dbtypes.Builder{} + err := ReaderDb.GetContext(ctx, &builder, ` + SELECT * FROM builders WHERE builder_index = $1 AND superseded = false + `, index) + if err != nil { + return nil + } + return &builder +} + +// GetBuildersByIndex returns all builders (including superseded) for a given index +func GetBuildersByIndex(ctx context.Context, index uint64) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders WHERE builder_index = $1 ORDER BY superseded ASC + `, index) + if err != nil { + logger.Errorf("Error while fetching builders by index: %v", err) + return nil + } + return builders +} + +// GetBuilderRange returns builders in a given index range (only active builders) +func GetBuilderRange(ctx context.Context, startIndex uint64, endIndex uint64) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders + WHERE builder_index >= $1 AND builder_index <= $2 AND superseded = false + ORDER BY builder_index ASC + `, startIndex, endIndex) + if err != nil { + logger.Errorf("Error while fetching builder range: %v", err) + return nil + } + return builders +} + +// GetMaxBuilderIndex returns the highest builder index in the database +func GetMaxBuilderIndex(ctx context.Context) (uint64, error) { + var maxIndex uint64 + err := ReaderDb.GetContext(ctx, &maxIndex, "SELECT COALESCE(MAX(builder_index), 0) FROM builders") + if err != nil { + return 0, fmt.Errorf("error getting max builder index: %w", err) + } + return maxIndex, nil +} + +// GetBuilderCount returns the count of builders (optionally only active) +func GetBuilderCount(ctx context.Context, activeOnly bool) (uint64, error) { + var count uint64 + var err error + if activeOnly { + err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders WHERE superseded = false") + } else { + err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders") + } + if err != nil { + return 0, fmt.Errorf("error getting builder count: %w", err) + } + return count, nil +} + +// SetBuilderSuperseded marks a builder as superseded +func SetBuilderSuperseded(pubkey []byte, tx *sqlx.Tx) error { + _, err := tx.Exec(` + UPDATE builders SET superseded = true WHERE pubkey = $1 + `, pubkey) + if err != nil { + return fmt.Errorf("error setting builder superseded: %w", err) + } + return nil +} + +// SetBuildersSuperseded marks multiple builders as superseded in a batch +func SetBuildersSuperseded(pubkeys [][]byte, tx *sqlx.Tx) error { + if len(pubkeys) == 0 { + return nil + } + + var sql strings.Builder + sql.WriteString("UPDATE builders SET superseded = true WHERE pubkey IN (") + + args := make([]any, len(pubkeys)) + for i, pk := range pubkeys { + if i > 0 { + sql.WriteString(", ") + } + fmt.Fprintf(&sql, "$%d", i+1) + args[i] = pk + } + sql.WriteString(")") + + _, err := tx.Exec(sql.String(), args...) + if err != nil { + return fmt.Errorf("error setting builders superseded: %w", err) + } + return nil +} + +// StreamBuildersByPubkeys streams builders by pubkeys in batches +func StreamBuildersByPubkeys(ctx context.Context, pubkeys [][]byte, cb func(builder *dbtypes.Builder) bool) error { + const batchSize = 1000 + + for i := 0; i < len(pubkeys); i += batchSize { + end := min(i+batchSize, len(pubkeys)) + batch := pubkeys[i:end] + + var sql strings.Builder + fmt.Fprintf(&sql, ` + SELECT + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + FROM builders + WHERE pubkey in (`) + + args := make([]any, len(batch)) + for j, pk := range batch { + if j > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", j+1) + args[j] = pk + } + fmt.Fprintf(&sql, ")") + + // Create pubkey map for ordering + pubkeyMap := make(map[string]int, len(batch)) + for pos, pk := range batch { + pubkeyMap[string(pk)] = pos + } + + // Fetch all builders for this batch + builders := make([]*dbtypes.Builder, len(batch)) + rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) + if err != nil { + return fmt.Errorf("error querying builders: %w", err) + } + defer rows.Close() + + for rows.Next() { + builder := &dbtypes.Builder{} + err := rows.Scan( + &builder.Pubkey, + &builder.BuilderIndex, + &builder.Version, + &builder.ExecutionAddress, + &builder.DepositEpoch, + &builder.WithdrawableEpoch, + &builder.Superseded, + ) + if err != nil { + return fmt.Errorf("error scanning builder: %w", err) + } + pos := pubkeyMap[string(builder.Pubkey)] + builders[pos] = builder + } + + if err = rows.Err(); err != nil { + return fmt.Errorf("error iterating rows: %w", err) + } + + // Stream in original order + for _, b := range builders { + if b != nil && !cb(b) { + return nil + } + } + } + + return nil +} + +// GetBuildersByExecutionAddress returns builders with a specific execution address +func GetBuildersByExecutionAddress(ctx context.Context, address []byte) []*dbtypes.Builder { + builders := []*dbtypes.Builder{} + err := ReaderDb.SelectContext(ctx, &builders, ` + SELECT * FROM builders WHERE execution_address = $1 ORDER BY builder_index ASC + `, address) + if err != nil { + logger.Errorf("Error while fetching builders by execution address: %v", err) + return nil + } + return builders +} + +// GetBuilderIndexesByFilter returns builder indexes matching a filter +func GetBuilderIndexesByFilter(ctx context.Context, filter dbtypes.BuilderFilter, currentEpoch uint64) ([]uint64, error) { + var sql strings.Builder + args := []interface{}{} + fmt.Fprint(&sql, ` + SELECT + builder_index + FROM builders + `) + + args = buildBuilderFilterSql(filter, currentEpoch, &sql, args) + + switch filter.OrderBy { + case dbtypes.BuilderOrderIndexAsc: + fmt.Fprint(&sql, " ORDER BY builder_index ASC") + case dbtypes.BuilderOrderIndexDesc: + fmt.Fprint(&sql, " ORDER BY builder_index DESC") + case dbtypes.BuilderOrderPubKeyAsc: + fmt.Fprint(&sql, " ORDER BY pubkey ASC") + case dbtypes.BuilderOrderPubKeyDesc: + fmt.Fprint(&sql, " ORDER BY pubkey DESC") + case dbtypes.BuilderOrderDepositEpochAsc: + fmt.Fprint(&sql, " ORDER BY deposit_epoch ASC") + case dbtypes.BuilderOrderDepositEpochDesc: + fmt.Fprint(&sql, " ORDER BY deposit_epoch DESC") + case dbtypes.BuilderOrderWithdrawableEpochAsc: + fmt.Fprint(&sql, " ORDER BY withdrawable_epoch ASC") + case dbtypes.BuilderOrderWithdrawableEpochDesc: + fmt.Fprint(&sql, " ORDER BY withdrawable_epoch DESC") + } + + builderIds := []uint64{} + err := ReaderDb.SelectContext(ctx, &builderIds, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching builders by filter: %v", err) + return nil, err + } + + return builderIds, nil +} + +func buildBuilderFilterSql(filter dbtypes.BuilderFilter, currentEpoch uint64, sql *strings.Builder, args []interface{}) []interface{} { + filterOp := "WHERE" + + if filter.MinIndex != nil { + fmt.Fprintf(sql, " %v builder_index >= $%v", filterOp, len(args)+1) + args = append(args, *filter.MinIndex) + filterOp = "AND" + } + if filter.MaxIndex != nil { + fmt.Fprintf(sql, " %v builder_index <= $%v", filterOp, len(args)+1) + args = append(args, *filter.MaxIndex) + filterOp = "AND" + } + if len(filter.PubKey) > 0 { + fmt.Fprintf(sql, " %v pubkey LIKE $%v", filterOp, len(args)+1) + args = append(args, append(filter.PubKey, '%')) + filterOp = "AND" + } + if len(filter.ExecutionAddress) > 0 { + fmt.Fprintf(sql, " %v execution_address = $%v", filterOp, len(args)+1) + args = append(args, filter.ExecutionAddress) + filterOp = "AND" + } + if len(filter.Status) > 0 { + statusConditions := make([]string, 0, len(filter.Status)) + for _, status := range filter.Status { + switch status { + case dbtypes.BuilderStatusActiveFilter: + statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch > $%v)", len(args)+1)) + args = append(args, ConvertUint64ToInt64(currentEpoch)) + case dbtypes.BuilderStatusExitedFilter: + statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch <= $%v)", len(args)+1)) + args = append(args, ConvertUint64ToInt64(currentEpoch)) + case dbtypes.BuilderStatusSupersededFilter: + statusConditions = append(statusConditions, "superseded = true") + } + } + if len(statusConditions) > 0 { + fmt.Fprintf(sql, " %v (%v)", filterOp, strings.Join(statusConditions, " OR ")) + } + } + + return args +} + +// StreamBuildersByIndexes streams builders by indexes +func StreamBuildersByIndexes(ctx context.Context, indexes []uint64, cb func(builder *dbtypes.Builder) bool) { + const batchSize = 1000 + + for i := 0; i < len(indexes); i += batchSize { + end := min(i+batchSize, len(indexes)) + batch := indexes[i:end] + + var sql strings.Builder + fmt.Fprint(&sql, ` + SELECT + pubkey, builder_index, version, execution_address, + deposit_epoch, withdrawable_epoch, superseded + FROM builders + WHERE builder_index IN (`) + + args := make([]any, len(batch)) + for j, idx := range batch { + if j > 0 { + fmt.Fprint(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", j+1) + args[j] = idx + } + fmt.Fprint(&sql, ")") + + // Create index map for ordering + indexMap := make(map[uint64]int, len(batch)) + for pos, idx := range batch { + indexMap[idx] = pos + } + + // Fetch all builders for this batch + builders := make([]*dbtypes.Builder, len(batch)) + rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) + if err != nil { + logger.Errorf("Error querying builders: %v", err) + return + } + + for rows.Next() { + builder := &dbtypes.Builder{} + err := rows.Scan( + &builder.Pubkey, + &builder.BuilderIndex, + &builder.Version, + &builder.ExecutionAddress, + &builder.DepositEpoch, + &builder.WithdrawableEpoch, + &builder.Superseded, + ) + if err != nil { + logger.Errorf("Error scanning builder: %v", err) + rows.Close() + return + } + pos := indexMap[builder.BuilderIndex] + builders[pos] = builder + } + rows.Close() + + // Stream in original order + for _, b := range builders { + if b != nil && !cb(b) { + return + } + } + } +} diff --git a/db/deposits.go b/db/deposits.go index c54a2c3a6..9adc88f1d 100644 --- a/db/deposits.go +++ b/db/deposits.go @@ -139,14 +139,18 @@ func GetDepositsFiltered(ctx context.Context, offset uint64, limit uint32, canon } if len(txFilter.WithdrawalAddress) > 0 { + // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit wdcreds1 := make([]byte, 32) wdcreds1[0] = 0x01 copy(wdcreds1[12:], txFilter.WithdrawalAddress) wdcreds2 := make([]byte, 32) wdcreds2[0] = 0x02 copy(wdcreds2[12:], txFilter.WithdrawalAddress) - args = append(args, wdcreds1, wdcreds2) - fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-1, len(args)) + wdcreds3 := make([]byte, 32) + wdcreds3[0] = 0x03 + copy(wdcreds3[12:], txFilter.WithdrawalAddress) + args = append(args, wdcreds1, wdcreds2, wdcreds3) + fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-2, len(args)-1, len(args)) filterOp = "AND" } diff --git a/db/epochs.go b/db/epochs.go index aafce0f59..b003009f6 100644 --- a/db/epochs.go +++ b/db/epochs.go @@ -14,8 +14,8 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error { epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22) + eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23) ON CONFLICT (epoch) DO UPDATE SET validator_count = excluded.validator_count, validator_balance = excluded.validator_balance, @@ -37,18 +37,19 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error { sync_participation = excluded.sync_participation, blob_count = excluded.blob_count, eth_gas_used = excluded.eth_gas_used, - eth_gas_limit = excluded.eth_gas_limit`, + eth_gas_limit = excluded.eth_gas_limit, + payload_count = excluded.payload_count`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO epochs ( epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)`, + eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)`, }), epoch.Epoch, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, - epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit) + epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount) if err != nil { return err } @@ -71,7 +72,7 @@ func GetEpochs(ctx context.Context, firstEpoch uint64, limit uint32) []*dbtypes. epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count, - eth_gas_used, eth_gas_limit + eth_gas_used, eth_gas_limit, payload_count FROM epochs WHERE epoch <= $1 ORDER BY epoch DESC diff --git a/db/orphaned_blocks.go b/db/orphaned_blocks.go index a027b860a..ae93d4e86 100644 --- a/db/orphaned_blocks.go +++ b/db/orphaned_blocks.go @@ -11,15 +11,15 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan _, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` INSERT INTO orphaned_blocks ( - root, header_ver, header_ssz, block_ver, block_ssz, block_uid - ) VALUES ($1, $2, $3, $4, $5, $6) + root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz + ) VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (root) DO NOTHING`, dbtypes.DBEngineSqlite: ` INSERT OR IGNORE INTO orphaned_blocks ( - root, header_ver, header_ssz, block_ver, block_ssz, block_uid - ) VALUES ($1, $2, $3, $4, $5, $6)`, + root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz + ) VALUES ($1, $2, $3, $4, $5, $6, $7)`, }), - block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid) + block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid, block.PayloadVer, block.PayloadSSZ) if err != nil { return err } @@ -29,7 +29,7 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan func GetOrphanedBlock(ctx context.Context, root []byte) *dbtypes.OrphanedBlock { block := dbtypes.OrphanedBlock{} err := ReaderDb.GetContext(ctx, &block, ` - SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid + SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz FROM orphaned_blocks WHERE root = $1 `, root) diff --git a/db/schema/pgsql/20260108202212_epbs-payload.sql b/db/schema/pgsql/20260108202212_epbs-payload.sql new file mode 100644 index 000000000..fdec29e44 --- /dev/null +++ b/db/schema/pgsql/20260108202212_epbs-payload.sql @@ -0,0 +1,83 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE public."unfinalized_blocks" ADD + "payload_ver" int NOT NULL DEFAULT 0, + "payload_ssz" bytea NULL; + +ALTER TABLE public."orphaned_blocks" ADD + "payload_ver" int NOT NULL DEFAULT 0, + "payload_ssz" bytea NULL; + +ALTER TABLE public."slots" ADD + "payload_status" smallint NOT NULL DEFAULT 0, + "builder_index" bigint NOT NULL DEFAULT -1, + "eth_block_parent_hash" bytea NULL; + +CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" + ON public."slots" + ("payload_status" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx" + ON public."slots" + ("eth_block_parent_hash" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "slots_builder_index_idx" + ON public."slots" + ("builder_index" ASC NULLS LAST); + +ALTER TABLE public."epochs" ADD + "payload_count" int NOT NULL DEFAULT 0; + +ALTER TABLE public."unfinalized_epochs" ADD + "payload_count" int NOT NULL DEFAULT 0; + +CREATE TABLE IF NOT EXISTS public."block_bids" ( + "parent_root" bytea NOT NULL, + "parent_hash" bytea NOT NULL, + "block_hash" bytea NOT NULL, + "fee_recipient" bytea NOT NULL, + "gas_limit" bigint NOT NULL, + "builder_index" bigint NOT NULL, + "slot" bigint NOT NULL, + "value" bigint NOT NULL, + "el_payment" bigint NOT NULL, + CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index) +); + +CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" + ON public."block_bids" + ("parent_root" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" + ON public."block_bids" + ("builder_index" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" + ON public."block_bids" + ("slot" ASC NULLS LAST); + +CREATE TABLE IF NOT EXISTS public."builders" ( + "pubkey" bytea NOT NULL, + "builder_index" bigint NOT NULL, + "version" smallint NOT NULL, + "execution_address" bytea NOT NULL, + "deposit_epoch" bigint NOT NULL, + "withdrawable_epoch" bigint NOT NULL, + "superseded" boolean NOT NULL DEFAULT false, + CONSTRAINT builders_pkey PRIMARY KEY (pubkey) +); + +CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" + ON public."builders" + ("builder_index" ASC NULLS LAST); + +CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" + ON public."builders" + ("execution_address" ASC NULLS LAST); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd \ No newline at end of file diff --git a/db/schema/sqlite/20260108202212_epbs-payload.sql b/db/schema/sqlite/20260108202212_epbs-payload.sql new file mode 100644 index 000000000..2bf22624a --- /dev/null +++ b/db/schema/sqlite/20260108202212_epbs-payload.sql @@ -0,0 +1,60 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE "unfinalized_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0; +ALTER TABLE "unfinalized_blocks" ADD "payload_ssz" BLOB NULL; + +ALTER TABLE "orphaned_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0; +ALTER TABLE "orphaned_blocks" ADD "payload_ssz" BLOB NULL; + +ALTER TABLE "slots" ADD "payload_status" smallint NOT NULL DEFAULT 0; +ALTER TABLE "slots" ADD "builder_index" BIGINT NOT NULL DEFAULT -1; +ALTER TABLE "slots" ADD "eth_block_parent_hash" BLOB NULL; + +CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" ON "slots" ("payload_status" ASC); +CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx" ON "slots" ("eth_block_parent_hash" ASC); +CREATE INDEX IF NOT EXISTS "slots_builder_index_idx" ON "slots" ("builder_index" ASC); + +ALTER TABLE "epochs" ADD "payload_count" int NOT NULL DEFAULT 0; + +ALTER TABLE "unfinalized_epochs" ADD "payload_count" int NOT NULL DEFAULT 0; + +CREATE TABLE IF NOT EXISTS "block_bids" ( + "parent_root" BLOB NOT NULL, + "parent_hash" BLOB NOT NULL, + "block_hash" BLOB NOT NULL, + "fee_recipient" BLOB NOT NULL, + "gas_limit" BIGINT NOT NULL, + "builder_index" BIGINT NOT NULL, + "slot" BIGINT NOT NULL, + "value" BIGINT NOT NULL, + "el_payment" BIGINT NOT NULL, + CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index) +); + +CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" ON "block_bids" ("parent_root" ASC); + +CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" ON "block_bids" ("builder_index" ASC); + +CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" ON "block_bids" ("slot" ASC); + +CREATE TABLE IF NOT EXISTS "builders" ( + "pubkey" BLOB NOT NULL, + "builder_index" BIGINT NOT NULL, + "version" SMALLINT NOT NULL, + "execution_address" BLOB NOT NULL, + "deposit_epoch" BIGINT NOT NULL, + "withdrawable_epoch" BIGINT NOT NULL, + "superseded" BOOLEAN NOT NULL DEFAULT false, + PRIMARY KEY (pubkey) +); + +CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" ON "builders" ("builder_index" ASC); + +CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" ON "builders" ("execution_address" ASC); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd \ No newline at end of file diff --git a/db/slots.go b/db/slots.go index d9d689bdd..33977da9d 100644 --- a/db/slots.go +++ b/db/slots.go @@ -20,31 +20,32 @@ func InsertSlot(ctx context.Context, tx *sqlx.Tx, slot *dbtypes.Slot) error { slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34) + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37) ON CONFLICT (slot, root) DO UPDATE SET status = excluded.status, eth_block_extra = excluded.eth_block_extra, eth_block_extra_text = excluded.eth_block_extra_text, - fork_id = excluded.fork_id`, + fork_id = excluded.fork_id, + payload_status = excluded.payload_status`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO slots ( slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)`, + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37)`, }), slot.Slot, slot.Proposer, slot.Status, slot.Root, slot.ParentRoot, slot.StateRoot, slot.Graffiti, slot.GraffitiText, slot.AttestationCount, slot.DepositCount, slot.ExitCount, slot.WithdrawCount, slot.WithdrawAmount, slot.AttesterSlashingCount, slot.ProposerSlashingCount, slot.BLSChangeCount, slot.EthTransactionCount, slot.EthBlockNumber, slot.EthBlockHash, - slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, slot.EthGasUsed, - slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, slot.MaxExecTime, - slot.ExecTimes, slot.BlockUid) + slot.EthBlockParentHash, slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, + slot.EthGasUsed, slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, + slot.MaxExecTime, slot.ExecTimes, slot.BlockUid, slot.PayloadStatus, slot.BuilderIndex) if err != nil { return err } @@ -99,9 +100,9 @@ func GetSlotsRange(ctx context.Context, firstSlot uint64, lastSlot uint64, withM "state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text", "attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count", "proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash", - "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used", - "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times", - "block_uid", + "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", + "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", + "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index", } for _, blockField := range blockFields { fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField) @@ -133,9 +134,9 @@ func GetSlotsByParentRoot(ctx context.Context, parentRoot []byte) []*dbtypes.Slo slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE parent_root = $1 ORDER BY slot DESC @@ -154,9 +155,9 @@ func GetSlotByRoot(ctx context.Context, root []byte) *dbtypes.Slot { root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE root = $1 `, root) @@ -182,9 +183,9 @@ func GetSlotsByRoots(ctx context.Context, roots [][]byte) map[phase0.Root]*dbtyp root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE root IN (%v) ORDER BY slot DESC`, @@ -258,9 +259,9 @@ func GetSlotsByBlockHash(ctx context.Context, blockHash []byte) []*dbtypes.Slot slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash, - eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used, - eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid + eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, + eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, + max_exec_time, exec_times, block_uid, payload_status, builder_index FROM slots WHERE eth_block_hash = $1 ORDER BY slot DESC @@ -320,9 +321,9 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo "state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text", "attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count", "proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash", - "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used", - "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times", - "block_uid", + "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", + "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", + "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index", } for _, blockField := range blockFields { fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField) @@ -477,6 +478,21 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo fmt.Fprintf(&sql, ` AND slots.eth_block_hash = $%v `, argIdx) args = append(args, filter.EthBlockHash) } + if filter.BuilderIndex != nil { + argIdx++ + fmt.Fprintf(&sql, ` AND slots.builder_index = $%v `, argIdx) + args = append(args, *filter.BuilderIndex) + } + if filter.WithPayloadOrphaned == 0 { + fmt.Fprintf(&sql, ` AND slots.payload_status != 2 `) + } else if filter.WithPayloadOrphaned == 2 { + fmt.Fprintf(&sql, ` AND slots.payload_status = 2 `) + } + if len(filter.EthBlockParentHash) > 0 { + argIdx++ + fmt.Fprintf(&sql, ` AND slots.eth_block_parent_hash = $%v `, argIdx) + args = append(args, filter.EthBlockParentHash) + } if filter.MinGasUsed != nil { argIdx++ fmt.Fprintf(&sql, ` AND slots.eth_gas_used >= $%v `, argIdx) diff --git a/db/unfinalized_blocks.go b/db/unfinalized_blocks.go index 914173fb5..fbb47253e 100644 --- a/db/unfinalized_blocks.go +++ b/db/unfinalized_blocks.go @@ -13,18 +13,16 @@ func InsertUnfinalizedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Unf _, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` INSERT INTO unfinalized_blocks ( - root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT (root) DO NOTHING`, dbtypes.DBEngineSqlite: ` INSERT OR IGNORE INTO unfinalized_blocks ( - root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, - block_uid - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`, + root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)`, }), - block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.Status, block.ForkId, block.RecvDelay, block.MinExecTime, block.MaxExecTime, - block.ExecTimes, block.BlockUid, + block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.PayloadVer, block.PayloadSSZ, block.Status, block.ForkId, block.RecvDelay, + block.MinExecTime, block.MaxExecTime, block.ExecTimes, block.BlockUid, ) if err != nil { return err @@ -90,6 +88,14 @@ func UpdateUnfinalizedBlockForkId(ctx context.Context, tx *sqlx.Tx, roots [][]by return nil } +func UpdateUnfinalizedBlockPayload(ctx context.Context, tx *sqlx.Tx, root []byte, payloadVer uint64, payloadSSZ []byte) error { + _, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET payload_ver = $1, payload_ssz = $2 WHERE root = $3`, payloadVer, payloadSSZ, root) + if err != nil { + return err + } + return nil +} + func UpdateUnfinalizedBlockExecutionTimes(ctx context.Context, tx *sqlx.Tx, root []byte, minExecTime uint32, maxExecTime uint32, execTimes []byte) error { _, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET min_exec_time = $1, max_exec_time = $2, exec_times = $3 WHERE root = $4`, minExecTime, maxExecTime, execTimes, root) if err != nil { @@ -141,7 +147,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db var sql strings.Builder args := []any{slot} - fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`) + fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`) rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...) if err != nil { @@ -152,7 +158,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db for rows.Next() { block := dbtypes.UnfinalizedBlock{} err := rows.Scan( - &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.Status, &block.ForkId, &block.RecvDelay, + &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.PayloadVer, &block.PayloadSSZ, &block.Status, &block.ForkId, &block.RecvDelay, &block.MinExecTime, &block.MaxExecTime, &block.ExecTimes, &block.BlockUid, ) if err != nil { @@ -165,13 +171,28 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db return nil } -func GetUnfinalizedBlock(ctx context.Context, root []byte) *dbtypes.UnfinalizedBlock { +func GetUnfinalizedBlock(ctx context.Context, root []byte, withHeader bool, withBody bool, withPayload bool) *dbtypes.UnfinalizedBlock { + var sql strings.Builder + fmt.Fprint(&sql, `SELECT root, slot`) + + if withHeader { + fmt.Fprint(&sql, `, header_ver, header_ssz`) + } + + if withBody { + fmt.Fprint(&sql, `, block_ver, block_ssz`) + } + + if withPayload { + fmt.Fprint(&sql, `, payload_ver, payload_ssz`) + } + + fmt.Fprint(&sql, `, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid`) + + fmt.Fprint(&sql, `FROM unfinalized_blocks WHERE root = $1`) + block := dbtypes.UnfinalizedBlock{} - err := ReaderDb.GetContext(ctx, &block, ` - SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid - FROM unfinalized_blocks - WHERE root = $1 - `, root) + err := ReaderDb.GetContext(ctx, &block, sql.String(), root) if err != nil { logger.Errorf("Error while fetching unfinalized block 0x%x: %v", root, err) return nil diff --git a/db/unfinalized_epochs.go b/db/unfinalized_epochs.go index 960fde0d4..c5a452591 100644 --- a/db/unfinalized_epochs.go +++ b/db/unfinalized_epochs.go @@ -14,8 +14,8 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25) + blob_count, eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26) ON CONFLICT (epoch, dependent_root, epoch_head_root) DO UPDATE SET epoch_head_fork_id = excluded.epoch_head_fork_id, validator_count = excluded.validator_count, @@ -38,19 +38,20 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf sync_participation = excluded.sync_participation, blob_count = excluded.blob_count, eth_gas_used = excluded.eth_gas_used, - eth_gas_limit = excluded.eth_gas_limit`, + eth_gas_limit = excluded.eth_gas_limit, + payload_count = excluded.payload_count`, dbtypes.DBEngineSqlite: ` INSERT OR REPLACE INTO unfinalized_epochs ( epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)`, + blob_count, eth_gas_used, eth_gas_limit, payload_count + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)`, }), epoch.Epoch, epoch.DependentRoot, epoch.EpochHeadRoot, epoch.EpochHeadForkId, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, - epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, + epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount, ) if err != nil { return err @@ -64,7 +65,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit + blob_count, eth_gas_used, eth_gas_limit, payload_count FROM unfinalized_epochs WHERE epoch >= $1`, epoch) if err != nil { @@ -78,7 +79,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db &e.Epoch, &e.DependentRoot, &e.EpochHeadRoot, &e.EpochHeadForkId, &e.ValidatorCount, &e.ValidatorBalance, &e.Eligible, &e.VotedTarget, &e.VotedHead, &e.VotedTotal, &e.BlockCount, &e.OrphanedCount, &e.AttestationCount, &e.DepositCount, &e.ExitCount, &e.WithdrawCount, &e.WithdrawAmount, &e.AttesterSlashingCount, &e.ProposerSlashingCount, &e.BLSChangeCount, &e.EthTransactionCount, &e.SyncParticipation, - &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, + &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, &e.PayloadCount, ) if err != nil { logger.Errorf("Error while scanning unfinalized epoch: %v", err) @@ -97,7 +98,7 @@ func GetUnfinalizedEpoch(ctx context.Context, epoch uint64, headRoot []byte) *db epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, - blob_count, eth_gas_used, eth_gas_limit + blob_count, eth_gas_used, eth_gas_limit, payload_count FROM unfinalized_epochs WHERE epoch = $1 AND epoch_head_root = $2 `, epoch, headRoot) diff --git a/dbtypes/dbtypes.go b/dbtypes/dbtypes.go index 7c90a09f6..dd0ee58ce 100644 --- a/dbtypes/dbtypes.go +++ b/dbtypes/dbtypes.go @@ -18,6 +18,14 @@ const ( Orphaned ) +type PayloadStatus uint8 + +const ( + PayloadStatusMissing PayloadStatus = iota + PayloadStatusCanonical + PayloadStatusOrphaned +) + type SlotHeader struct { Slot uint64 `db:"slot"` Proposer uint64 `db:"proposer"` @@ -25,40 +33,43 @@ type SlotHeader struct { } type Slot struct { - Slot uint64 `db:"slot"` - Proposer uint64 `db:"proposer"` - Status SlotStatus `db:"status"` - Root []byte `db:"root"` - ParentRoot []byte `db:"parent_root"` - StateRoot []byte `db:"state_root"` - Graffiti []byte `db:"graffiti"` - GraffitiText string `db:"graffiti_text"` - AttestationCount uint64 `db:"attestation_count"` - DepositCount uint64 `db:"deposit_count"` - ExitCount uint64 `db:"exit_count"` - WithdrawCount uint64 `db:"withdraw_count"` - WithdrawAmount uint64 `db:"withdraw_amount"` - AttesterSlashingCount uint64 `db:"attester_slashing_count"` - ProposerSlashingCount uint64 `db:"proposer_slashing_count"` - BLSChangeCount uint64 `db:"bls_change_count"` - EthTransactionCount uint64 `db:"eth_transaction_count"` - BlobCount uint64 `db:"blob_count"` - EthGasUsed uint64 `db:"eth_gas_used"` - EthGasLimit uint64 `db:"eth_gas_limit"` - EthBaseFee uint64 `db:"eth_base_fee"` - EthFeeRecipient []byte `db:"eth_fee_recipient"` - EthBlockNumber *uint64 `db:"eth_block_number"` - EthBlockHash []byte `db:"eth_block_hash"` - EthBlockExtra []byte `db:"eth_block_extra"` - EthBlockExtraText string `db:"eth_block_extra_text"` - SyncParticipation float32 `db:"sync_participation"` - ForkId uint64 `db:"fork_id"` - BlockSize uint64 `db:"block_size"` - RecvDelay int32 `db:"recv_delay"` - MinExecTime uint32 `db:"min_exec_time"` - MaxExecTime uint32 `db:"max_exec_time"` - ExecTimes []byte `db:"exec_times"` - BlockUid uint64 `db:"block_uid"` + Slot uint64 `db:"slot"` + Proposer uint64 `db:"proposer"` + Status SlotStatus `db:"status"` + Root []byte `db:"root"` + ParentRoot []byte `db:"parent_root"` + StateRoot []byte `db:"state_root"` + Graffiti []byte `db:"graffiti"` + GraffitiText string `db:"graffiti_text"` + AttestationCount uint64 `db:"attestation_count"` + DepositCount uint64 `db:"deposit_count"` + ExitCount uint64 `db:"exit_count"` + WithdrawCount uint64 `db:"withdraw_count"` + WithdrawAmount uint64 `db:"withdraw_amount"` + AttesterSlashingCount uint64 `db:"attester_slashing_count"` + ProposerSlashingCount uint64 `db:"proposer_slashing_count"` + BLSChangeCount uint64 `db:"bls_change_count"` + EthTransactionCount uint64 `db:"eth_transaction_count"` + BlobCount uint64 `db:"blob_count"` + EthGasUsed uint64 `db:"eth_gas_used"` + EthGasLimit uint64 `db:"eth_gas_limit"` + EthBaseFee uint64 `db:"eth_base_fee"` + EthFeeRecipient []byte `db:"eth_fee_recipient"` + EthBlockNumber *uint64 `db:"eth_block_number"` + EthBlockHash []byte `db:"eth_block_hash"` + EthBlockParentHash []byte `db:"eth_block_parent_hash"` + EthBlockExtra []byte `db:"eth_block_extra"` + EthBlockExtraText string `db:"eth_block_extra_text"` + SyncParticipation float32 `db:"sync_participation"` + ForkId uint64 `db:"fork_id"` + BlockSize uint64 `db:"block_size"` + RecvDelay int32 `db:"recv_delay"` + MinExecTime uint32 `db:"min_exec_time"` + MaxExecTime uint32 `db:"max_exec_time"` + ExecTimes []byte `db:"exec_times"` + PayloadStatus PayloadStatus `db:"payload_status"` + BlockUid uint64 `db:"block_uid"` + BuilderIndex int64 `db:"builder_index"` // Builder index, -1 for self-built blocks (MaxUint64) } type Epoch struct { @@ -84,15 +95,18 @@ type Epoch struct { EthGasUsed uint64 `db:"eth_gas_used"` EthGasLimit uint64 `db:"eth_gas_limit"` SyncParticipation float32 `db:"sync_participation"` + PayloadCount uint64 `db:"payload_count"` } type OrphanedBlock struct { - Root []byte `db:"root"` - HeaderVer uint64 `db:"header_ver"` - HeaderSSZ []byte `db:"header_ssz"` - BlockVer uint64 `db:"block_ver"` - BlockSSZ []byte `db:"block_ssz"` - BlockUid uint64 `db:"block_uid"` + Root []byte `db:"root"` + HeaderVer uint64 `db:"header_ver"` + HeaderSSZ []byte `db:"header_ssz"` + BlockVer uint64 `db:"block_ver"` + BlockSSZ []byte `db:"block_ssz"` + PayloadVer uint64 `db:"payload_ver"` + PayloadSSZ []byte `db:"payload_ssz"` + BlockUid uint64 `db:"block_uid"` } type SlotAssignment struct { @@ -121,6 +135,8 @@ type UnfinalizedBlock struct { HeaderSSZ []byte `db:"header_ssz"` BlockVer uint64 `db:"block_ver"` BlockSSZ []byte `db:"block_ssz"` + PayloadVer uint64 `db:"payload_ver"` + PayloadSSZ []byte `db:"payload_ssz"` Status UnfinalizedBlockStatus `db:"status"` ForkId uint64 `db:"fork_id"` RecvDelay int32 `db:"recv_delay"` @@ -156,6 +172,7 @@ type UnfinalizedEpoch struct { EthGasUsed uint64 `db:"eth_gas_used"` EthGasLimit uint64 `db:"eth_gas_limit"` SyncParticipation float32 `db:"sync_participation"` + PayloadCount uint64 `db:"payload_count"` } type OrphanedEpoch struct { @@ -548,6 +565,30 @@ type ElTokenTransfer struct { AmountRaw []byte `db:"amount_raw"` } +// ePBS types + +type BlockBid struct { + ParentRoot []byte `db:"parent_root"` + ParentHash []byte `db:"parent_hash"` + BlockHash []byte `db:"block_hash"` + FeeRecipient []byte `db:"fee_recipient"` + GasLimit uint64 `db:"gas_limit"` + BuilderIndex int64 `db:"builder_index"` + Slot uint64 `db:"slot"` + Value uint64 `db:"value"` + ElPayment uint64 `db:"el_payment"` +} + +type Builder struct { + Pubkey []byte `db:"pubkey"` + BuilderIndex uint64 `db:"builder_index"` + Version uint8 `db:"version"` + ExecutionAddress []byte `db:"execution_address"` + DepositEpoch int64 `db:"deposit_epoch"` + WithdrawableEpoch int64 `db:"withdrawable_epoch"` + Superseded bool `db:"superseded"` +} + // Withdrawal types const ( WithdrawalTypeBeaconWithdrawal = 0 diff --git a/dbtypes/other.go b/dbtypes/other.go index 7936d6aec..27741606c 100644 --- a/dbtypes/other.go +++ b/dbtypes/other.go @@ -53,6 +53,7 @@ type BlockFilter struct { InvertProposer bool WithOrphaned uint8 WithMissing uint8 + WithPayloadOrphaned uint8 // 0: only canonical payloads, 1: all, 2: only orphaned payloads MinSyncParticipation *float32 MaxSyncParticipation *float32 MinExecTime *uint32 @@ -67,6 +68,8 @@ type BlockFilter struct { ForkIds []uint64 // Filter by fork IDs EthBlockNumber *uint64 // Filter by EL block number EthBlockHash []byte // Filter by EL block hash + EthBlockParentHash []byte // Filter by EL block parent hash + BuilderIndex *int64 // Filter by builder index (-1 for self-built blocks) MinGasUsed *uint64 // Filter by minimum gas used MaxGasUsed *uint64 // Filter by maximum gas used MinGasLimit *uint64 // Filter by minimum gas limit @@ -222,6 +225,43 @@ type ValidatorFilter struct { Offset uint64 } +// Builder filter types + +type BuilderOrder uint8 + +const ( + BuilderOrderIndexAsc BuilderOrder = iota + BuilderOrderIndexDesc + BuilderOrderPubKeyAsc + BuilderOrderPubKeyDesc + BuilderOrderBalanceAsc + BuilderOrderBalanceDesc + BuilderOrderDepositEpochAsc + BuilderOrderDepositEpochDesc + BuilderOrderWithdrawableEpochAsc + BuilderOrderWithdrawableEpochDesc +) + +type BuilderStatus uint8 + +const ( + BuilderStatusActiveFilter BuilderStatus = iota + BuilderStatusExitedFilter + BuilderStatusSupersededFilter +) + +type BuilderFilter struct { + MinIndex *uint64 + MaxIndex *uint64 + PubKey []byte + ExecutionAddress []byte + Status []BuilderStatus + + OrderBy BuilderOrder + Limit uint64 + Offset uint64 +} + // EL Explorer filters type ElTransactionFilter struct { diff --git a/go.mod b/go.mod index b0b1996c6..155394d18 100644 --- a/go.mod +++ b/go.mod @@ -255,3 +255,5 @@ require ( modernc.org/memory v1.11.0 // indirect modernc.org/sqlite v1.46.1 // indirect ) + +replace github.com/attestantio/go-eth2-client => github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554 diff --git a/go.sum b/go.sum index e2434c554..91bb96335 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,6 @@ github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0L github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/attestantio/go-eth2-client v0.28.0 h1:2zIIIMPvSD+g6h3TgVXsoda/Yw3e+wjo1e8CZEanORU= -github.com/attestantio/go-eth2-client v0.28.0/go.mod h1:PO9sHFCq+1RiG+Eh3eOR2GYvYV64Qzg7idM3kLgCs5k= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -565,6 +563,8 @@ github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M= github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4= github.com/pk910/dynamic-ssz v1.2.1 h1:84eNMiiOYDiNC2Y1m5A/UtIPs6u/9SsvG4RVSBRGE5U= github.com/pk910/dynamic-ssz v1.2.1/go.mod h1:HXRWLNcgj3DL65Kznrb+RdL3DEKw2JBZ/6crooqGoII= +github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554 h1:FmusNWzB2XDzRQK1OoLo9XUH/PBatWPkztJOX/Ther4= +github.com/pk910/go-eth2-client v0.0.0-20260225144847-75b86704f554/go.mod h1:8fpxrIBBVbOcVG3vcHe5ubOHIeqW3N5t7kS4oU5EeJU= github.com/pk910/hashtree-bindings v0.0.1 h1:Sw+UlPlrBle4LUg04kqLFybVQcfmamwKL1QsrR3GU0g= github.com/pk910/hashtree-bindings v0.0.1/go.mod h1:eayIpxMFkWzMsydESu/5bV8wglZzSE/c9mq6DQdn204= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= diff --git a/handlers/api/network_forks_v1.go b/handlers/api/network_forks_v1.go index 7a2c82299..0ddfcce33 100644 --- a/handlers/api/network_forks_v1.go +++ b/handlers/api/network_forks_v1.go @@ -112,7 +112,8 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo { // Helper function to add consensus fork addConsensusFork := func(name string, forkEpoch *uint64, forkVersion phase0.Version) { if forkEpoch != nil && *forkEpoch < uint64(18446744073709551615) { - forkDigest := chainState.GetForkDigest(forkVersion, nil) + blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*forkEpoch)) + forkDigest := chainState.GetForkDigest(forkVersion, blobParams) version := fmt.Sprintf("0x%x", forkVersion) epoch := *forkEpoch forks = append(forks, &APINetworkForkInfo{ @@ -135,6 +136,7 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo { addConsensusFork("Deneb", specs.DenebForkEpoch, specs.DenebForkVersion) addConsensusFork("Electra", specs.ElectraForkEpoch, specs.ElectraForkVersion) addConsensusFork("Fulu", specs.FuluForkEpoch, specs.FuluForkVersion) + addConsensusFork("Gloas", specs.GloasForkEpoch, specs.GloasForkVersion) // Add BPO forks from BLOB_SCHEDULE for i, blobSchedule := range specs.BlobSchedule { diff --git a/handlers/builder.go b/handlers/builder.go new file mode 100644 index 000000000..19feb83fd --- /dev/null +++ b/handlers/builder.go @@ -0,0 +1,318 @@ +package handlers + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/attestantio/go-eth2-client/spec/gloas" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" + + "github.com/ethpandaops/dora/clients/consensus" + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/indexer/beacon" + "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/dora/templates" + "github.com/ethpandaops/dora/types/models" +) + +// BuilderDetail will return the main "builder" page using a go template +func BuilderDetail(w http.ResponseWriter, r *http.Request) { + var builderTemplateFiles = append(layoutTemplateFiles, + "builder/builder.html", + "builder/recentBlocks.html", + "builder/recentBids.html", + "builder/recentDeposits.html", + "_svg/timeline.html", + ) + var notfoundTemplateFiles = append(layoutTemplateFiles, + "builder/notfound.html", + ) + + var pageTemplate = templates.GetTemplate(builderTemplateFiles...) + data := InitPageData(w, r, "builders", "/builder", "Builder", builderTemplateFiles) + + var builder *gloas.Builder + var builderIndex uint64 + var superseded bool + + vars := mux.Vars(r) + idxOrPubKey := strings.Replace(vars["idxOrPubKey"], "0x", "", -1) + builderPubKey, err := hex.DecodeString(idxOrPubKey) + if err != nil || len(builderPubKey) != 48 { + // search by index + idx, err := strconv.ParseUint(vars["idxOrPubKey"], 10, 64) + if err == nil { + builderIndex = idx + builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(idx)) + if builder == nil { + // Try from DB + dbBuilder := db.GetActiveBuilderByIndex(r.Context(), idx) + if dbBuilder != nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + superseded = dbBuilder.Superseded + } + } + } + } else { + // search by pubkey + dbBuilder := db.GetBuilderByPubkey(r.Context(), builderPubKey) + if dbBuilder != nil { + builderIndex = dbBuilder.BuilderIndex + superseded = dbBuilder.Superseded + builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(dbBuilder.BuilderIndex)) + if builder == nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + } + } + } + + if builder == nil { + data := InitPageData(w, r, "builders", "/builder", "Builder not found", notfoundTemplateFiles) + w.Header().Set("Content-Type", "text/html") + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", templates.GetTemplate(notfoundTemplateFiles...).ExecuteTemplate(w, "layout", data)) + return + } + + tabView := "blocks" + if r.URL.Query().Has("v") { + tabView = r.URL.Query().Get("v") + } + + var pageError error + pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1) + if pageError == nil { + data.Data, pageError = getBuilderPageData(builderIndex, superseded, tabView) + } + if data.Data == nil { + pageError = errors.New("builder not found") + } + if pageError != nil { + handlePageError(w, r, pageError) + return + } + w.Header().Set("Content-Type", "text/html") + + if r.URL.Query().Has("lazy") { + // return the selected tab content only (lazy loaded) + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "lazyPage", data.Data)) + } else { + handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "layout", data)) + } +} + +func getBuilderPageData(builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, error) { + pageData := &models.BuilderPageData{} + pageCacheKey := fmt.Sprintf("builder:%v:%v", builderIndex, tabView) + pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { + pageData, cacheTimeout := buildBuilderPageData(pageCall.CallCtx, builderIndex, superseded, tabView) + pageCall.CacheTimeout = cacheTimeout + return pageData + }) + if pageErr == nil && pageRes != nil { + resData, resOk := pageRes.(*models.BuilderPageData) + if !resOk { + return nil, ErrInvalidPageModel + } + pageData = resData + } + return pageData, pageErr +} + +func buildBuilderPageData(ctx context.Context, builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, time.Duration) { + logrus.Debugf("builder page called: %v", builderIndex) + + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() + currentEpoch := chainState.CurrentEpoch() + + // Get builder data + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + // Try from DB + dbBuilder := db.GetActiveBuilderByIndex(ctx, builderIndex) + if dbBuilder != nil { + builder = beacon.UnwrapDbBuilder(dbBuilder) + superseded = dbBuilder.Superseded + } + } + if builder == nil { + return nil, 0 + } + + // Determine state + state := "Active" + if superseded { + state = "Superseded" + } else if builder.WithdrawableEpoch <= currentEpoch { + state = "Exited" + } + + pageData := &models.BuilderPageData{ + CurrentEpoch: uint64(currentEpoch), + Index: builderIndex, + Name: services.GlobalBeaconService.GetValidatorName(builderIndex | services.BuilderIndexFlag), + PublicKey: builder.PublicKey[:], + Balance: uint64(builder.Balance), + ExecutionAddress: builder.ExecutionAddress[:], + Version: builder.Version, + State: state, + IsSuperseded: superseded, + TabView: tabView, + GloasIsActive: specs.GloasForkEpoch != nil && uint64(currentEpoch) >= *specs.GloasForkEpoch, + } + + // Deposit epoch + if builder.DepositEpoch < 18446744073709551615 { + pageData.ShowDeposit = true + pageData.DepositEpoch = uint64(builder.DepositEpoch) + pageData.DepositTs = chainState.EpochToTime(builder.DepositEpoch) + } + + // Withdrawable epoch + if builder.WithdrawableEpoch < 18446744073709551615 { + pageData.ShowWithdrawable = true + pageData.WithdrawableEpoch = uint64(builder.WithdrawableEpoch) + pageData.WithdrawableTs = chainState.EpochToTime(builder.WithdrawableEpoch) + } + + // Load tab-specific data + switch tabView { + case "blocks": + pageData.RecentBlocks = buildBuilderRecentBlocks(ctx, builderIndex, chainState) + case "bids": + pageData.RecentBids = buildBuilderRecentBids(ctx, builderIndex, chainState) + case "deposits": + pageData.RecentDeposits = buildBuilderRecentDeposits(ctx, builderIndex, chainState) + } + + return pageData, 10 * time.Minute +} + +func buildBuilderRecentBlocks(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBlock { + // Filter blocks by builder index using the new DB filter + builderIndexInt64 := int64(builderIndex) + filter := &dbtypes.BlockFilter{ + BuilderIndex: &builderIndexInt64, + WithOrphaned: 1, // Include both canonical and orphaned + WithMissing: 0, // Exclude missing blocks + } + + // Get blocks built by this builder + dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, filter, 0, 20, 0) + + // Collect block hashes for batch bid lookup + blockHashes := make([][]byte, 0, len(dbBlocks)) + validBlocks := make([]*dbtypes.Slot, 0, len(dbBlocks)) + + for _, assignedSlot := range dbBlocks { + if assignedSlot.Block == nil { + continue + } + slot := assignedSlot.Block + + // Only include blocks with actual payloads + if slot.PayloadStatus != dbtypes.PayloadStatusCanonical && slot.PayloadStatus != dbtypes.PayloadStatusOrphaned { + continue + } + + if len(slot.EthBlockHash) > 0 { + blockHashes = append(blockHashes, slot.EthBlockHash) + validBlocks = append(validBlocks, slot) + } + } + + // Batch fetch all bids for these block hashes + bidsMap := db.GetBidsByBlockHashes(ctx, blockHashes, builderIndex) + + // Build result + blocks := make([]*models.BuilderPageDataBlock, 0, len(validBlocks)) + for _, slot := range validBlocks { + block := &models.BuilderPageDataBlock{ + Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot.Slot))), + Slot: slot.Slot, + Ts: chainState.SlotToTime(phase0.Slot(slot.Slot)), + BlockRoot: slot.Root, + BlockHash: slot.EthBlockHash, + Status: uint16(slot.PayloadStatus), + FeeRecipient: slot.EthFeeRecipient, + GasLimit: slot.EthGasLimit, + } + + // Look up bid info for Value and ElPayment from the batch result + blockHashKey := fmt.Sprintf("%x", slot.EthBlockHash) + if bid, ok := bidsMap[blockHashKey]; ok { + block.Value = bid.Value + block.ElPayment = bid.ElPayment + } + + blocks = append(blocks, block) + } + + return blocks +} + +func buildBuilderRecentBids(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBid { + bids, _ := db.GetBidsByBuilderIndex(ctx, builderIndex, 0, 20) + + result := make([]*models.BuilderPageDataBid, 0, len(bids)) + for _, bid := range bids { + bidData := &models.BuilderPageDataBid{ + Slot: bid.Slot, + Ts: chainState.SlotToTime(phase0.Slot(bid.Slot)), + ParentRoot: bid.ParentRoot, + ParentHash: bid.ParentHash, + BlockHash: bid.BlockHash, + FeeRecipient: bid.FeeRecipient, + GasLimit: bid.GasLimit, + Value: bid.Value, + ElPayment: bid.ElPayment, + IsWinning: false, + } + + // Check if this bid won (payload was included) + slots := db.GetSlotsByBlockHash(ctx, bid.BlockHash) + for _, slot := range slots { + if slot.PayloadStatus == dbtypes.PayloadStatusCanonical { + bidData.IsWinning = true + break + } + } + + result = append(result, bidData) + } + + return result +} + +func buildBuilderRecentDeposits(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataDeposit { + // Builder exits are tracked as voluntary exits with BuilderIndexFlag set + builderIndexWithFlag := builderIndex | services.BuilderIndexFlag + filter := &dbtypes.VoluntaryExitFilter{ + MinIndex: builderIndexWithFlag, + MaxIndex: builderIndexWithFlag, + } + + exits, _ := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, filter, 0, 20) + + result := make([]*models.BuilderPageDataDeposit, 0, len(exits)) + for _, exit := range exits { + result = append(result, &models.BuilderPageDataDeposit{ + Type: "exit", + SlotNumber: exit.SlotNumber, + SlotRoot: exit.SlotRoot, + Time: chainState.SlotToTime(phase0.Slot(exit.SlotNumber)), + Orphaned: exit.Orphaned, + }) + } + + return result +} diff --git a/handlers/builders.go b/handlers/builders.go new file mode 100644 index 000000000..b1ae7a563 --- /dev/null +++ b/handlers/builders.go @@ -0,0 +1,295 @@ +package handlers + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/services" + "github.com/ethpandaops/dora/templates" + "github.com/ethpandaops/dora/types/models" + "github.com/sirupsen/logrus" +) + +// Builders will return the main "builders" page using a go template +func Builders(w http.ResponseWriter, r *http.Request) { + var buildersTemplateFiles = append(layoutTemplateFiles, + "builders/builders.html", + "_svg/professor.html", + ) + + var pageTemplate = templates.GetTemplate(buildersTemplateFiles...) + data := InitPageData(w, r, "builders", "/builders", "Builders", buildersTemplateFiles) + + urlArgs := r.URL.Query() + var pageNumber uint64 = 1 + if urlArgs.Has("p") { + pageNumber, _ = strconv.ParseUint(urlArgs.Get("p"), 10, 64) + } + var pageSize uint64 = 50 + if urlArgs.Has("c") { + pageSize, _ = strconv.ParseUint(urlArgs.Get("c"), 10, 64) + } + if urlArgs.Has("json") && pageSize > 10000 { + pageSize = 10000 + } else if !urlArgs.Has("json") && pageSize > 1000 { + pageSize = 1000 + } + + var filterPubKey string + var filterIndex string + var filterExecutionAddr string + var filterStatus string + if urlArgs.Has("f") { + if urlArgs.Has("f.pubkey") { + filterPubKey = urlArgs.Get("f.pubkey") + } + if urlArgs.Has("f.index") { + filterIndex = urlArgs.Get("f.index") + } + if urlArgs.Has("f.execution_addr") { + filterExecutionAddr = urlArgs.Get("f.execution_addr") + } + if urlArgs.Has("f.status") { + filterStatus = strings.Join(urlArgs["f.status"], ",") + } + } + var sortOrder string + if urlArgs.Has("o") { + sortOrder = urlArgs.Get("o") + } + + var pageError error + pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1) + if pageError == nil { + data.Data, pageError = getBuildersPageData(pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + } + if pageError != nil { + handlePageError(w, r, pageError) + return + } + + if urlArgs.Has("json") { + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(data.Data) + if err != nil { + logrus.WithError(err).Error("error encoding builders data") + http.Error(w, "Internal server error", http.StatusServiceUnavailable) + } + return + } + + w.Header().Set("Content-Type", "text/html") + if handleTemplateError(w, r, "builders.go", "Builders", "", pageTemplate.ExecuteTemplate(w, "layout", data)) != nil { + return // an error has occurred and was processed + } +} + +func getBuildersPageData(pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, error) { + pageData := &models.BuildersPageData{} + pageCacheKey := fmt.Sprintf("builders:%v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { + pageData, cacheTimeout := buildBuildersPageData(pageCall.CallCtx, pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageCall.CacheTimeout = cacheTimeout + return pageData + }) + if pageErr == nil && pageRes != nil { + resData, resOk := pageRes.(*models.BuildersPageData) + if !resOk { + return nil, ErrInvalidPageModel + } + pageData = resData + } + return pageData, pageErr +} + +func buildBuildersPageData(ctx context.Context, pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, time.Duration) { + logrus.Debugf("builders page called: %v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus) + pageData := &models.BuildersPageData{} + cacheTime := 10 * time.Minute + + chainState := services.GlobalBeaconService.GetChainState() + + builderFilter := dbtypes.BuilderFilter{ + Limit: pageSize, + Offset: (pageNumber - 1) * pageSize, + } + + filterArgs := url.Values{} + if filterPubKey != "" || filterIndex != "" || filterExecutionAddr != "" || filterStatus != "" { + if filterPubKey != "" { + pageData.FilterPubKey = filterPubKey + filterArgs.Add("f.pubkey", filterPubKey) + filterPubKeyVal, _ := hex.DecodeString(strings.Replace(filterPubKey, "0x", "", -1)) + builderFilter.PubKey = filterPubKeyVal + } + if filterIndex != "" { + pageData.FilterIndex = filterIndex + filterArgs.Add("f.index", filterIndex) + filterIndexVal, _ := strconv.ParseUint(filterIndex, 10, 64) + builderFilter.MinIndex = &filterIndexVal + builderFilter.MaxIndex = &filterIndexVal + } + if filterExecutionAddr != "" { + pageData.FilterExecutionAddr = filterExecutionAddr + filterArgs.Add("f.execution_addr", filterExecutionAddr) + filterExecutionAddrVal, _ := hex.DecodeString(strings.Replace(filterExecutionAddr, "0x", "", -1)) + builderFilter.ExecutionAddress = filterExecutionAddrVal + } + if filterStatus != "" { + pageData.FilterStatus = filterStatus + filterArgs.Add("f.status", filterStatus) + filterStatusVal := strings.Split(filterStatus, ",") + builderFilter.Status = make([]dbtypes.BuilderStatus, 0, len(filterStatusVal)) + for _, status := range filterStatusVal { + switch status { + case "active": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusActiveFilter) + case "exited": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusExitedFilter) + case "superseded": + builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusSupersededFilter) + } + } + } + } + + // apply sort order + switch sortOrder { + case "index-d": + builderFilter.OrderBy = dbtypes.BuilderOrderIndexDesc + case "pubkey": + builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyAsc + case "pubkey-d": + builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyDesc + case "balance": + builderFilter.OrderBy = dbtypes.BuilderOrderBalanceAsc + case "balance-d": + builderFilter.OrderBy = dbtypes.BuilderOrderBalanceDesc + case "deposit": + builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochAsc + case "deposit-d": + builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochDesc + case "withdrawable": + builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochAsc + case "withdrawable-d": + builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochDesc + default: + builderFilter.OrderBy = dbtypes.BuilderOrderIndexAsc + pageData.IsDefaultSorting = true + sortOrder = "index" + } + pageData.Sorting = sortOrder + + // get latest builder set + builderSetRsp, builderSetLen := services.GlobalBeaconService.GetFilteredBuilderSet(ctx, &builderFilter, true) + if len(builderSetRsp) == 0 { + cacheTime = 5 * time.Minute + } + + currentEpoch := chainState.CurrentEpoch() + + // get status options + pageData.FilterStatusOpts = []models.BuildersPageDataStatusOption{ + {Status: "active", Count: 0}, + {Status: "exited", Count: 0}, + {Status: "superseded", Count: 0}, + } + + totalPages := builderSetLen / pageSize + if (builderSetLen % pageSize) > 0 { + totalPages++ + } + if pageNumber == 0 { + pageData.IsDefaultPage = true + } else if pageNumber >= totalPages { + if totalPages == 0 { + pageNumber = 0 + } else { + pageNumber = totalPages + } + } + + pageData.PageSize = pageSize + pageData.TotalPages = totalPages + pageData.CurrentPageIndex = pageNumber + if pageNumber > 1 { + pageData.PrevPageIndex = pageNumber - 1 + } + if pageNumber < totalPages { + pageData.NextPageIndex = pageNumber + 1 + } + if totalPages > 1 { + pageData.LastPageIndex = totalPages + } + + // get builders + pageData.Builders = make([]*models.BuildersPageDataBuilder, 0, len(builderSetRsp)) + + for _, builder := range builderSetRsp { + if builder.Builder == nil { + continue + } + + builderData := &models.BuildersPageDataBuilder{ + Index: uint64(builder.Index), + PublicKey: builder.Builder.PublicKey[:], + ExecutionAddress: builder.Builder.ExecutionAddress[:], + Balance: uint64(builder.Builder.Balance), + } + + // Determine state + if builder.Superseded { + builderData.State = "Superseded" + } else if builder.Builder.WithdrawableEpoch <= currentEpoch { + builderData.State = "Exited" + } else { + builderData.State = "Active" + } + + // Deposit epoch + if builder.Builder.DepositEpoch < 18446744073709551615 { + builderData.ShowDeposit = true + builderData.DepositEpoch = uint64(builder.Builder.DepositEpoch) + builderData.DepositTs = chainState.EpochToTime(builder.Builder.DepositEpoch) + } + + // Withdrawable epoch + if builder.Builder.WithdrawableEpoch < 18446744073709551615 { + builderData.ShowWithdrawable = true + builderData.WithdrawableEpoch = uint64(builder.Builder.WithdrawableEpoch) + builderData.WithdrawableTs = chainState.EpochToTime(builder.Builder.WithdrawableEpoch) + } + + pageData.Builders = append(pageData.Builders, builderData) + } + pageData.BuilderCount = builderSetLen + pageData.FirstBuilder = pageNumber * pageSize + pageData.LastBuilder = pageData.FirstBuilder + uint64(len(pageData.Builders)) + + // Populate UrlParams for page jump functionality + pageData.UrlParams = make(map[string]string) + for key, values := range filterArgs { + if len(values) > 0 { + pageData.UrlParams[key] = values[0] + } + } + pageData.UrlParams["c"] = fmt.Sprintf("%v", pageData.PageSize) + + pageData.FilteredPageLink = fmt.Sprintf("/builders?f&%v&c=%v", filterArgs.Encode(), pageData.PageSize) + + // Sort status options alphabetically + sort.Slice(pageData.FilterStatusOpts, func(a, b int) bool { + return strings.Compare(pageData.FilterStatusOpts[a].Status, pageData.FilterStatusOpts[b].Status) < 0 + }) + + return pageData, cacheTime +} diff --git a/handlers/deposits.go b/handlers/deposits.go index 5c1d8f2f3..7c485aea2 100644 --- a/handlers/deposits.go +++ b/handlers/deposits.go @@ -155,6 +155,9 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint // load initiated deposits dbDepositTxs := db.GetDepositTxs(ctx, 0, 20) for _, depositTx := range dbDepositTxs { + // Check if this is a builder deposit (0x03 withdrawal credentials) + isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03 + depositTxData := &models.DepositsPageDataInitiatedDeposit{ Index: depositTx.Index, Address: depositTx.TxSender, @@ -166,6 +169,7 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint Block: depositTx.BlockNumber, Orphaned: depositTx.Orphaned, Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2, + IsBuilder: isBuilder, } validatorIndex, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey)) @@ -216,15 +220,20 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint dbDeposits, _ := services.GlobalBeaconService.GetDepositRequestsByFilter(ctx, depositFilter, 0, uint32(20)) for _, deposit := range dbDeposits { + // Check if this is a builder deposit (0x03 withdrawal credentials) + wdCreds := deposit.WithdrawalCredentials() + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.DepositsPageDataIncludedDeposit{ PublicKey: deposit.PublicKey(), - Withdrawalcredentials: deposit.WithdrawalCredentials(), + Withdrawalcredentials: wdCreds, Amount: deposit.Amount(), Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)), SlotNumber: deposit.Request.SlotNumber, SlotRoot: deposit.Request.SlotRoot, Orphaned: deposit.RequestOrphaned, DepositorAddress: deposit.SourceAddress(), + IsBuilder: isBuilder, } if deposit.IsQueued { @@ -314,12 +323,17 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint } for _, queueEntry := range queuedDeposits.Queue[:limit] { + // Check if this is a builder deposit (0x03 withdrawal credentials) + wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:] + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.DepositsPageDataQueuedDeposit{ QueuePosition: queueEntry.QueuePos, EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate), PublicKey: queueEntry.PendingDeposit.Pubkey[:], - Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:], + Withdrawalcredentials: wdCreds, Amount: uint64(queueEntry.PendingDeposit.Amount), + IsBuilder: isBuilder, } if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositData.PublicKey)); !found { diff --git a/handlers/epoch.go b/handlers/epoch.go index 807975d47..c3ac1baf7 100644 --- a/handlers/epoch.go +++ b/handlers/epoch.go @@ -170,12 +170,18 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat pageData.MissedCount++ } + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.EpochPageDataSlot{ Slot: slot, Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), Ts: chainState.SlotToTime(phase0.Slot(slot)), Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing, Status: uint8(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), Proposer: dbSlot.Proposer, ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), AttestationCount: dbSlot.AttestationCount, diff --git a/handlers/exits.go b/handlers/exits.go index 534b4770a..82da09050 100644 --- a/handlers/exits.go +++ b/handlers/exits.go @@ -10,6 +10,7 @@ import ( "time" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" @@ -159,43 +160,70 @@ func buildExitsPageData(ctx context.Context, firstEpoch uint64, pageSize uint64, dbVoluntaryExits, _ := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, voluntaryExitFilter, 0, uint32(20)) for _, voluntaryExit := range dbVoluntaryExits { exitData := &models.ExitsPageDataRecentExit{ - SlotNumber: voluntaryExit.SlotNumber, - SlotRoot: voluntaryExit.SlotRoot, - Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), - Orphaned: voluntaryExit.Orphaned, - ValidatorIndex: voluntaryExit.ValidatorIndex, - ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex), + SlotNumber: voluntaryExit.SlotNumber, + SlotRoot: voluntaryExit.SlotRoot, + Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), + Orphaned: voluntaryExit.Orphaned, } - validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) - if validator == nil { - exitData.ValidatorStatus = "Unknown" - } else { - exitData.PublicKey = validator.Validator.PublicKey[:] - exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials - - if strings.HasPrefix(validator.Status.String(), "pending") { - exitData.ValidatorStatus = "Pending" - } else if validator.Status == v1.ValidatorStateActiveOngoing { - exitData.ValidatorStatus = "Active" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveExiting { - exitData.ValidatorStatus = "Exiting" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveSlashed { - exitData.ValidatorStatus = "Slashed" - exitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateExitedUnslashed { - exitData.ValidatorStatus = "Exited" - } else if validator.Status == v1.ValidatorStateExitedSlashed { - exitData.ValidatorStatus = "Slashed" + // Check if this is a builder exit (validator index has BuilderIndexFlag set) + if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 { + builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag + exitData.IsBuilder = true + exitData.ValidatorIndex = builderIndex + + // Resolve builder name via validatornames service (with BuilderIndexFlag) + exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + exitData.ValidatorStatus = "Unknown" } else { - exitData.ValidatorStatus = validator.Status.String() + exitData.PublicKey = builder.PublicKey[:] + + // Determine builder status + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + exitData.ValidatorStatus = "Exited" + } else { + exitData.ValidatorStatus = "Exiting" + } } + } else { + // Regular validator exit + exitData.ValidatorIndex = voluntaryExit.ValidatorIndex + exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) - if exitData.ShowUpcheck { - exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) - exitData.UpcheckMaximum = uint8(3) + validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) + if validator == nil { + exitData.ValidatorStatus = "Unknown" + } else { + exitData.PublicKey = validator.Validator.PublicKey[:] + exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials + + if strings.HasPrefix(validator.Status.String(), "pending") { + exitData.ValidatorStatus = "Pending" + } else if validator.Status == v1.ValidatorStateActiveOngoing { + exitData.ValidatorStatus = "Active" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveExiting { + exitData.ValidatorStatus = "Exiting" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveSlashed { + exitData.ValidatorStatus = "Slashed" + exitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateExitedUnslashed { + exitData.ValidatorStatus = "Exited" + } else if validator.Status == v1.ValidatorStateExitedSlashed { + exitData.ValidatorStatus = "Slashed" + } else { + exitData.ValidatorStatus = validator.Status.String() + } + + if exitData.ShowUpcheck { + exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) + exitData.UpcheckMaximum = uint8(3) + } } } diff --git a/handlers/included_deposits.go b/handlers/included_deposits.go index bd5b34eef..8d0639751 100644 --- a/handlers/included_deposits.go +++ b/handlers/included_deposits.go @@ -195,15 +195,19 @@ func buildFilteredIncludedDepositsPageData(ctx context.Context, pageIdx uint64, chainState := services.GlobalBeaconService.GetChainState() for _, deposit := range dbDeposits { + wdCreds := deposit.WithdrawalCredentials() + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.IncludedDepositsPageDataDeposit{ PublicKey: deposit.PublicKey(), - Withdrawalcredentials: deposit.WithdrawalCredentials(), + Withdrawalcredentials: wdCreds, Amount: deposit.Amount(), Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)), SlotNumber: deposit.Request.SlotNumber, SlotRoot: deposit.Request.SlotRoot, Orphaned: deposit.RequestOrphaned, DepositorAddress: deposit.SourceAddress(), + IsBuilder: isBuilder, } if deposit.Request != nil { diff --git a/handlers/index.go b/handlers/index.go index 94f150602..9443611ae 100644 --- a/handlers/index.go +++ b/handlers/index.go @@ -290,6 +290,19 @@ func buildIndexPageData(ctx context.Context) (*models.IndexPageData, time.Durati ForkDigest: forkDigest[:], }) } + if specs.GloasForkEpoch != nil && *specs.GloasForkEpoch < uint64(18446744073709551615) { + blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*specs.GloasForkEpoch)) + forkDigest := chainState.GetForkDigest(specs.GloasForkVersion, blobParams) + pageData.NetworkForks = append(pageData.NetworkForks, &models.IndexPageDataForks{ + Name: "Gloas", + Epoch: *specs.GloasForkEpoch, + Version: specs.GloasForkVersion[:], + Time: uint64(chainState.EpochToTime(phase0.Epoch(*specs.GloasForkEpoch)).Unix()), + Active: uint64(currentEpoch) >= *specs.GloasForkEpoch, + Type: "consensus", + ForkDigest: forkDigest[:], + }) + } // Add BPO forks from BLOB_SCHEDULE elBlobSchedule := services.GlobalBeaconService.GetExecutionChainState().GetFullBlobSchedule() @@ -425,14 +438,23 @@ func buildIndexPageRecentBlocksData(ctx context.Context, pageData *models.IndexP if blockData == nil { continue } + + epoch := chainState.EpochOfSlot(phase0.Slot(blockData.Slot)) + + payloadStatus := blockData.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + blockModel := &models.IndexPageDataBlocks{ - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(blockData.Slot))), - Slot: blockData.Slot, - Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)), - Proposer: blockData.Proposer, - ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer), - Status: uint64(blockData.Status), - BlockRoot: blockData.Root, + Epoch: uint64(epoch), + Slot: blockData.Slot, + Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)), + Proposer: blockData.Proposer, + ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer), + Status: uint64(blockData.Status), + PayloadStatus: uint8(payloadStatus), + BlockRoot: blockData.Root, } if blockData.EthBlockNumber != nil { blockModel.WithEthBlock = true @@ -470,16 +492,24 @@ func buildIndexPageRecentSlotsData(ctx context.Context, pageData *models.IndexPa dbSlot := dbSlots[dbIdx] dbIdx++ + epoch := chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot)) + + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.IndexPageDataSlots{ - Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))), - Ts: chainState.SlotToTime(phase0.Slot(slot)), - Status: uint64(dbSlot.Status), - Proposer: dbSlot.Proposer, - ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), - BlockRoot: dbSlot.Root, - ParentRoot: dbSlot.ParentRoot, - ForkGraph: make([]*models.IndexPageDataForkGraph, 0), + Slot: slot, + Epoch: uint64(epoch), + Ts: chainState.SlotToTime(phase0.Slot(slot)), + Status: uint64(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), + Proposer: dbSlot.Proposer, + ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer), + BlockRoot: dbSlot.Root, + ParentRoot: dbSlot.ParentRoot, + ForkGraph: make([]*models.IndexPageDataForkGraph, 0), } pageData.RecentSlots = append(pageData.RecentSlots, slotData) blockCount++ diff --git a/handlers/initiated_deposits.go b/handlers/initiated_deposits.go index 61b50bcca..b9f93a814 100644 --- a/handlers/initiated_deposits.go +++ b/handlers/initiated_deposits.go @@ -176,6 +176,8 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64, } for _, depositTx := range dbDepositTxs { + isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03 + depositTxData := &models.InitiatedDepositsPageDataDeposit{ Index: depositTx.Index, Address: depositTx.TxSender, @@ -188,6 +190,7 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64, Orphaned: depositTx.Orphaned, Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2, ValidatorStatus: "", + IsBuilder: isBuilder, } if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey)); !found { diff --git a/handlers/pageData.go b/handlers/pageData.go index 2dc6b1717..03546fb02 100644 --- a/handlers/pageData.go +++ b/handlers/pageData.go @@ -90,6 +90,8 @@ func InitPageData(w http.ResponseWriter, r *http.Request, active, path, title st } func createMenuItems(active string) []types.MainMenuItem { + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() hiddenFor := []string{"confirmation", "login", "register"} if utils.SliceContains(hiddenFor, active) { @@ -203,6 +205,20 @@ func createMenuItems(active string) []types.MainMenuItem { validatorMenu = append(validatorMenu, types.NavigationGroup{ Links: validatorMenuLinks, }) + + if specs != nil && specs.GloasForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.GloasForkEpoch { + builderMenu := []types.NavigationLink{ + { + Label: "Builders", + Path: "/builders", + Icon: "fa-building", + }, + } + validatorMenu = append(validatorMenu, types.NavigationGroup{ + Links: builderMenu, + }) + } + validatorMenu = append(validatorMenu, types.NavigationGroup{ Links: []types.NavigationLink{ { @@ -223,8 +239,6 @@ func createMenuItems(active string) []types.MainMenuItem { }, }) - chainState := services.GlobalBeaconService.GetChainState() - specs := chainState.GetSpecs() if specs != nil && specs.ElectraForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.ElectraForkEpoch { validatorMenu = append(validatorMenu, types.NavigationGroup{ Links: []types.NavigationLink{ diff --git a/handlers/queued_deposits.go b/handlers/queued_deposits.go index 471bb4c86..cb2e23739 100644 --- a/handlers/queued_deposits.go +++ b/handlers/queued_deposits.go @@ -207,12 +207,16 @@ func buildQueuedDepositsPageData(ctx context.Context, pageIdx uint64, pageSize u for i := start; i < end; i++ { queueEntry := filteredQueue[i] + wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:] + isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03 + depositData := &models.QueuedDepositsPageDataDeposit{ QueuePosition: queueEntry.QueuePos, EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate), PublicKey: queueEntry.PendingDeposit.Pubkey[:], Amount: uint64(queueEntry.PendingDeposit.Amount), - Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:], + Withdrawalcredentials: wdCreds, + IsBuilder: isBuilder, } // Get validator status if exists diff --git a/handlers/search.go b/handlers/search.go index a0e9e7818..627d3a628 100644 --- a/handlers/search.go +++ b/handlers/search.go @@ -92,9 +92,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } blockResult := &dbtypes.SearchBlockResult{} - err = db.ReaderDb.Get(blockResult, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.GetContext(ctx, blockResult, ` + SELECT slot, root, status + FROM slots WHERE slot = $1 AND status != 0 LIMIT 1`, searchQuery) if err == nil { @@ -128,9 +128,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR blockHash, err := hex.DecodeString(hashQuery) if err == nil { blockResult := &dbtypes.SearchBlockResult{} - err = db.ReaderDb.Get(blockResult, ` - SELECT slot, root, orphaned - FROM slots + err = db.ReaderDb.GetContext(ctx, blockResult, ` + SELECT slot, root, orphaned + FROM slots WHERE root = $1 OR state_root = $1 LIMIT 1`, blockHash) @@ -149,7 +149,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } names := &dbtypes.SearchNameResult{} - err = db.ReaderDb.Get(names, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.GetContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT name FROM validator_names @@ -166,7 +166,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR } graffiti := &dbtypes.SearchGraffitiResult{} - err = db.ReaderDb.Get(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.GetContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT graffiti FROM slots @@ -261,7 +261,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se switch searchType { case "epochs": dbres := &dbtypes.SearchAheadEpochsResult{} - err = db.ReaderDb.Select(dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%") + err = db.ReaderDb.SelectContext(ctx, dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%") if err == nil { model := make([]models.SearchAheadEpochsResult, len(*dbres)) for idx, entry := range *dbres { @@ -299,9 +299,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } } else { dbres := &dbtypes.SearchAheadSlotsResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, status + FROM slots WHERE slot < $1 AND (root = $2 OR state_root = $2) ORDER BY slot LIMIT 1`, minSlotIdx, blockHash) if err != nil { @@ -335,9 +335,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadSlotsResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, status + FROM slots WHERE slot = $1 AND status != 0 ORDER BY slot LIMIT 10`, blockNumber) if err == nil { @@ -383,9 +383,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadExecBlocksResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, eth_block_hash, eth_block_number, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, eth_block_hash, eth_block_number, status + FROM slots WHERE slot < $1 AND eth_block_hash = $2 ORDER BY slot LIMIT 10`, minSlotIdx, blockHash) if err != nil { @@ -422,9 +422,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se result = res } else { dbres := &dbtypes.SearchAheadExecBlocksResult{} - err = db.ReaderDb.Select(dbres, ` - SELECT slot, root, eth_block_hash, eth_block_number, status - FROM slots + err = db.ReaderDb.SelectContext(ctx, dbres, ` + SELECT slot, root, eth_block_hash, eth_block_number, status + FROM slots WHERE slot < $1 AND eth_block_number = $2 ORDER BY slot LIMIT 10`, minSlotIdx, blockNumber) if err == nil { @@ -444,7 +444,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } case "graffiti": graffiti := &dbtypes.SearchAheadGraffitiResult{} - err = db.ReaderDb.Select(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT graffiti, count(*) as count FROM slots @@ -472,7 +472,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } case "valname": names := &dbtypes.SearchAheadValidatorNameResult{} - err = db.ReaderDb.Select(names, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT name, count(*) as count FROM validator_names @@ -523,7 +523,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) <= 96 { // Search by pubkey prefix validators := &dbtypes.SearchAheadValidatorResult{} - err = db.ReaderDb.Select(validators, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, validators, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT v.validator_index, v.pubkey FROM validators v @@ -573,7 +573,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) < 40 { // Search by address prefix in DB addresses := &dbtypes.SearchAheadAddressResult{} - err = db.ReaderDb.Select(addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT address, is_contract FROM el_accounts @@ -626,7 +626,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se } else if len(search) >= 2 && len(search) < 64 { // Search by transaction hash prefix in DB transactions := &dbtypes.SearchAheadTransactionResult{} - err = db.ReaderDb.Select(transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{ + err = db.ReaderDb.SelectContext(ctx, transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{ dbtypes.DBEnginePgsql: ` SELECT DISTINCT ON (tx_hash) tx_hash, block_number, reverted FROM el_transactions diff --git a/handlers/slot.go b/handlers/slot.go index 0a2750c31..755b57bff 100644 --- a/handlers/slot.go +++ b/handlers/slot.go @@ -46,6 +46,8 @@ func Slot(w http.ResponseWriter, r *http.Request) { "slot/deposit_requests.html", "slot/withdrawal_requests.html", "slot/consolidation_requests.html", + "slot/bids.html", + "slot/ptc_votes.html", ) var notfoundTemplateFiles = append(layoutTemplateFiles, "slot/notfound.html", @@ -611,9 +613,16 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock pageData.VoluntaryExits = make([]*models.SlotPageVoluntaryExit, pageData.VoluntaryExitsCount) for i, exit := range voluntaryExits { + validatorIndex := uint64(exit.Message.ValidatorIndex) + isBuilder := validatorIndex&services.BuilderIndexFlag != 0 + displayIndex := validatorIndex + if isBuilder { + displayIndex = validatorIndex &^ services.BuilderIndexFlag + } pageData.VoluntaryExits[i] = &models.SlotPageVoluntaryExit{ - ValidatorIndex: uint64(exit.Message.ValidatorIndex), - ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(exit.Message.ValidatorIndex)), + ValidatorIndex: displayIndex, + ValidatorName: services.GlobalBeaconService.GetValidatorName(validatorIndex), + IsBuilder: isBuilder, Epoch: uint64(exit.Message.Epoch), Signature: exit.Signature[:], } @@ -729,7 +738,40 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock pageData.SyncAggParticipation = utils.SyncCommitteeParticipation(pageData.SyncAggregateBits, specs.SyncCommitteeSize) } - if executionPayload, _ := blockData.Block.ExecutionPayload(); executionPayload != nil { + if payloadBid, err := blockData.Block.SignedExecutionPayloadBid(); err == nil { + commitments := make([][]byte, len(payloadBid.Message.BlobKZGCommitments)) + for i := range payloadBid.Message.BlobKZGCommitments { + commitments[i] = payloadBid.Message.BlobKZGCommitments[i][:] + } + + pageData.PayloadHeader = &models.SlotPagePayloadHeader{ + PayloadStatus: uint16(0), + ParentBlockHash: payloadBid.Message.ParentBlockHash[:], + ParentBlockRoot: payloadBid.Message.ParentBlockRoot[:], + BlockHash: payloadBid.Message.BlockHash[:], + GasLimit: uint64(payloadBid.Message.GasLimit), + BuilderIndex: uint64(payloadBid.Message.BuilderIndex), + BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(payloadBid.Message.BuilderIndex) | services.BuilderIndexFlag), + Slot: uint64(payloadBid.Message.Slot), + Value: uint64(payloadBid.Message.Value), + BlobKZGCommitments: commitments, + Signature: payloadBid.Signature[:], + } + } + + var executionPayload *spec.VersionedExecutionPayload + if blockData.Block.Version >= spec.DataVersionGloas && blockData.Payload != nil { + executionPayload = &spec.VersionedExecutionPayload{ + Version: spec.DataVersionGloas, + Gloas: blockData.Payload.Message.Payload, + } + + pageData.PayloadHeader.PayloadStatus = uint16(1) + } else { + executionPayload, _ = blockData.Block.ExecutionPayload() + } + + if executionPayload != nil { pageData.ExecutionData = &models.SlotPageExecutionData{} if parentHash, err := executionPayload.ParentHash(); err == nil { @@ -869,10 +911,27 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock } } - if requests, err := blockData.Block.ExecutionRequests(); err == nil && requests != nil { - getSlotPageDepositRequests(pageData, requests.Deposits) - getSlotPageWithdrawalRequests(pageData, requests.Withdrawals) - getSlotPageConsolidationRequests(pageData, requests.Consolidations) + if specs.ElectraForkEpoch != nil && uint64(epoch) >= *specs.ElectraForkEpoch { + var requests *electra.ExecutionRequests + if blockData.Block.Version >= spec.DataVersionGloas { + if blockData.Payload != nil { + requests = blockData.Payload.Message.ExecutionRequests + } + } else { + requests, _ = blockData.Block.ExecutionRequests() + } + + if requests != nil { + getSlotPageDepositRequests(pageData, requests.Deposits) + getSlotPageWithdrawalRequests(pageData, requests.Withdrawals) + getSlotPageConsolidationRequests(pageData, requests.Consolidations) + } + } + + // Load execution payload bids for ePBS (gloas+) blocks + if blockData.Block.Version >= spec.DataVersionGloas { + getSlotPageBids(pageData) + getSlotPagePtcVotes(pageData, blockData, blockData.Header.Message.Slot) } return pageData @@ -1084,3 +1143,171 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol pageData.ConsolidationRequestsCount = uint64(len(pageData.ConsolidationRequests)) } + +func getSlotPageBids(pageData *models.SlotPageBlockData) { + beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer() + bids := beaconIndexer.GetBlockBids(phase0.Root(pageData.ParentRoot)) + + pageData.Bids = make([]*models.SlotPageBid, 0, len(bids)) + + // Get the winning block hash for comparison + var winningBlockHash []byte + if pageData.ExecutionData != nil { + winningBlockHash = pageData.ExecutionData.BlockHash + } + + for _, bid := range bids { + bidData := &models.SlotPageBid{ + ParentRoot: bid.ParentRoot, + ParentHash: bid.ParentHash, + BlockHash: bid.BlockHash, + FeeRecipient: bid.FeeRecipient, + GasLimit: bid.GasLimit, + BuilderIndex: uint64(bid.BuilderIndex), + BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(bid.BuilderIndex)), + IsSelfBuilt: bid.BuilderIndex < 0, + Slot: bid.Slot, + Value: bid.Value, + ElPayment: bid.ElPayment, + TotalValue: bid.Value + bid.ElPayment, + } + + // Check if this is the winning bid + if winningBlockHash != nil && len(bid.BlockHash) == len(winningBlockHash) { + isWinning := true + for i := range bid.BlockHash { + if bid.BlockHash[i] != winningBlockHash[i] { + isWinning = false + break + } + } + bidData.IsWinning = isWinning + } + + pageData.Bids = append(pageData.Bids, bidData) + } + + // Sort by total value (value + el_payment) descending + for i := 0; i < len(pageData.Bids)-1; i++ { + for j := i + 1; j < len(pageData.Bids); j++ { + if pageData.Bids[j].TotalValue > pageData.Bids[i].TotalValue { + pageData.Bids[i], pageData.Bids[j] = pageData.Bids[j], pageData.Bids[i] + } + } + } + + pageData.BidsCount = uint64(len(pageData.Bids)) +} + +// getSlotPagePtcVotes extracts PTC (Payload Timeliness Committee) votes from a Gloas block. +// PTC votes are included in blocks as payload attestations for the PREVIOUS slot. +func getSlotPagePtcVotes(pageData *models.SlotPageBlockData, blockData *services.CombinedBlockResponse, blockSlot phase0.Slot) { + // Only Gloas+ blocks have payload attestations + if blockData.Block.Version < spec.DataVersionGloas || blockData.Block.Gloas == nil { + return + } + + payloadAttestations := blockData.Block.Gloas.Message.Body.PayloadAttestations + if len(payloadAttestations) == 0 { + return + } + + chainState := services.GlobalBeaconService.GetChainState() + specs := chainState.GetSpecs() + + // PTC votes are for the previous slot + votedSlot := blockSlot - 1 + votedEpoch := chainState.EpochOfSlot(votedSlot) + + // Get epoch stats for the voted slot to retrieve PTC duties + var ptcDuties []phase0.ValidatorIndex + beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer() + epochStats := beaconIndexer.GetEpochStatsByEpoch(votedEpoch) + for _, es := range epochStats { + values := es.GetValues(true) + if values != nil && values.PtcDuties != nil { + slotInEpoch := uint64(votedSlot) % specs.SlotsPerEpoch + if slotInEpoch < uint64(len(values.PtcDuties)) && values.PtcDuties[slotInEpoch] != nil { + // Convert from active indice indices to validator indices + ptcDuties = make([]phase0.ValidatorIndex, len(values.PtcDuties[slotInEpoch])) + for i, activeIdx := range values.PtcDuties[slotInEpoch] { + if int(activeIdx) < len(values.ActiveIndices) { + ptcDuties[i] = values.ActiveIndices[activeIdx] + } + } + break + } + } + } + + // Build PTC votes structure + ptcVotes := &models.SlotPagePtcVotes{ + VotedSlot: uint64(votedSlot), + TotalPtcSize: specs.PtcSize, + Aggregates: make([]*models.SlotPagePtcAggregate, 0, len(payloadAttestations)), + } + + // Track participating validators across all aggregates + participatingValidators := make(map[uint64]bool) + totalVotes := uint64(0) + + for _, pa := range payloadAttestations { + if pa == nil || pa.Data == nil { + continue + } + + // Set voted block root from first attestation + if ptcVotes.VotedBlockRoot == nil { + ptcVotes.VotedBlockRoot = pa.Data.BeaconBlockRoot[:] + } + + aggregate := &models.SlotPagePtcAggregate{ + PayloadPresent: pa.Data.PayloadPresent, + BlobDataAvailable: pa.Data.BlobDataAvailable, + AggregationBits: pa.AggregationBits, + Signature: pa.Signature[:], + Validators: make([]uint64, 0), + } + + // Map aggregation bits to validator indices + if len(ptcDuties) > 0 { + for i := 0; i < len(ptcDuties) && i < len(pa.AggregationBits)*8; i++ { + byteIdx := i / 8 + bitIdx := i % 8 + if byteIdx < len(pa.AggregationBits) && (pa.AggregationBits[byteIdx]>>bitIdx)&1 == 1 { + validatorIdx := uint64(ptcDuties[i]) + aggregate.Validators = append(aggregate.Validators, validatorIdx) + participatingValidators[validatorIdx] = true + } + } + } + + aggregate.VoteCount = uint64(len(aggregate.Validators)) + totalVotes += aggregate.VoteCount + + ptcVotes.Aggregates = append(ptcVotes.Aggregates, aggregate) + } + + // Build PTC committee list + ptcVotes.PtcCommittee = make([]types.NamedValidator, len(ptcDuties)) + for i, vidx := range ptcDuties { + ptcVotes.PtcCommittee[i] = types.NamedValidator{ + Index: uint64(vidx), + Name: services.GlobalBeaconService.GetValidatorName(uint64(vidx)), + } + + // Add to validator names map + if pageData.ValidatorNames == nil { + pageData.ValidatorNames = make(map[uint64]string) + } + pageData.ValidatorNames[uint64(vidx)] = ptcVotes.PtcCommittee[i].Name + } + + // Calculate participation rate + if specs.PtcSize > 0 { + ptcVotes.Participation = float64(len(participatingValidators)) / float64(specs.PtcSize) + } + + pageData.PtcVotes = ptcVotes + pageData.PtcVotesCount = totalVotes +} diff --git a/handlers/slots.go b/handlers/slots.go index 8b3b42050..de22a1905 100644 --- a/handlers/slots.go +++ b/handlers/slots.go @@ -254,12 +254,19 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64, dbSlot := dbSlots[dbIdx] dbIdx++ + epoch := chainState.EpochOfSlot(phase0.Slot(slot)) + payloadStatus := dbSlot.PayloadStatus + if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData := &models.SlotsPageDataSlot{ Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(phase0.Slot(slot)), Finalized: finalized, Status: uint8(dbSlot.Status), + PayloadStatus: uint8(payloadStatus), Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing, Synchronized: dbSlot.SyncParticipation != -1, Proposer: dbSlot.Proposer, diff --git a/handlers/slots_filtered.go b/handlers/slots_filtered.go index 52fb38d71..195b0a88f 100644 --- a/handlers/slots_filtered.go +++ b/handlers/slots_filtered.go @@ -465,12 +465,13 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui break } slot := phase0.Slot(dbBlock.Slot) + epoch := chainState.EpochOfSlot(slot) slotData := &models.SlotsFilteredPageDataSlot{ Slot: uint64(slot), - Epoch: uint64(chainState.EpochOfSlot(slot)), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(slot), - Finalized: finalizedEpoch >= chainState.EpochOfSlot(slot), + Finalized: finalizedEpoch >= epoch, Synchronized: true, Scheduled: slot >= currentSlot, Proposer: dbBlock.Proposer, @@ -502,6 +503,12 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui slotData.EthBlockNumber = *dbBlock.Block.EthBlockNumber } + payloadStatus := dbBlock.Block.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData.PayloadStatus = uint8(payloadStatus) + if pageData.DisplayMevBlock && dbBlock.Block.EthBlockHash != nil { if mevBlock, exists := mevBlocksMap[fmt.Sprintf("%x", dbBlock.Block.EthBlockHash)]; exists { slotData.IsMevBlock = true diff --git a/handlers/validator_slots.go b/handlers/validator_slots.go index 3cf97d9a4..d4cb6417d 100644 --- a/handlers/validator_slots.go +++ b/handlers/validator_slots.go @@ -113,12 +113,13 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx break } slot := blockAssignment.Slot + epoch := chainState.EpochOfSlot(phase0.Slot(slot)) slotData := &models.ValidatorSlotsPageDataSlot{ Slot: slot, - Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))), + Epoch: uint64(epoch), Ts: chainState.SlotToTime(phase0.Slot(slot)), - Finalized: finalizedEpoch >= chainState.EpochOfSlot(phase0.Slot(slot)), + Finalized: finalizedEpoch >= epoch, Status: uint8(0), Proposer: validator, ProposerName: pageData.Name, @@ -141,6 +142,12 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx slotData.WithEthBlock = true slotData.EthBlockNumber = *dbBlock.EthBlockNumber } + + payloadStatus := dbBlock.PayloadStatus + if !chainState.IsEip7732Enabled(epoch) { + payloadStatus = dbtypes.PayloadStatusCanonical + } + slotData.PayloadStatus = uint8(payloadStatus) } pageData.Slots = append(pageData.Slots, slotData) } diff --git a/handlers/voluntary_exits.go b/handlers/voluntary_exits.go index 990b42057..834aef56d 100644 --- a/handlers/voluntary_exits.go +++ b/handlers/voluntary_exits.go @@ -9,6 +9,7 @@ import ( "strings" v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/services" @@ -164,40 +165,67 @@ func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pa SlotRoot: voluntaryExit.SlotRoot, Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)), Orphaned: voluntaryExit.Orphaned, - ValidatorIndex: voluntaryExit.ValidatorIndex, - ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex), ValidatorStatus: "", } - validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) - if validator == nil { - voluntaryExitData.ValidatorStatus = "Unknown" - } else { - voluntaryExitData.PublicKey = validator.Validator.PublicKey[:] - voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials - - if strings.HasPrefix(validator.Status.String(), "pending") { - voluntaryExitData.ValidatorStatus = "Pending" - } else if validator.Status == v1.ValidatorStateActiveOngoing { - voluntaryExitData.ValidatorStatus = "Active" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveExiting { - voluntaryExitData.ValidatorStatus = "Exiting" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateActiveSlashed { - voluntaryExitData.ValidatorStatus = "Slashed" - voluntaryExitData.ShowUpcheck = true - } else if validator.Status == v1.ValidatorStateExitedUnslashed { - voluntaryExitData.ValidatorStatus = "Exited" - } else if validator.Status == v1.ValidatorStateExitedSlashed { - voluntaryExitData.ValidatorStatus = "Slashed" + // Check if this is a builder exit (validator index has BuilderIndexFlag set) + if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 { + builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag + voluntaryExitData.IsBuilder = true + voluntaryExitData.ValidatorIndex = builderIndex + + // Resolve builder name via validatornames service (with BuilderIndexFlag) + voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex)) + if builder == nil { + voluntaryExitData.ValidatorStatus = "Unknown" } else { - voluntaryExitData.ValidatorStatus = validator.Status.String() + voluntaryExitData.PublicKey = builder.PublicKey[:] + + // Determine builder status + currentEpoch := chainState.CurrentEpoch() + if builder.WithdrawableEpoch <= currentEpoch { + voluntaryExitData.ValidatorStatus = "Exited" + } else { + voluntaryExitData.ValidatorStatus = "Exiting" + } } + } else { + // Regular validator exit + voluntaryExitData.ValidatorIndex = voluntaryExit.ValidatorIndex + voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex) + + validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false) + if validator == nil { + voluntaryExitData.ValidatorStatus = "Unknown" + } else { + voluntaryExitData.PublicKey = validator.Validator.PublicKey[:] + voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials + + if strings.HasPrefix(validator.Status.String(), "pending") { + voluntaryExitData.ValidatorStatus = "Pending" + } else if validator.Status == v1.ValidatorStateActiveOngoing { + voluntaryExitData.ValidatorStatus = "Active" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveExiting { + voluntaryExitData.ValidatorStatus = "Exiting" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateActiveSlashed { + voluntaryExitData.ValidatorStatus = "Slashed" + voluntaryExitData.ShowUpcheck = true + } else if validator.Status == v1.ValidatorStateExitedUnslashed { + voluntaryExitData.ValidatorStatus = "Exited" + } else if validator.Status == v1.ValidatorStateExitedSlashed { + voluntaryExitData.ValidatorStatus = "Slashed" + } else { + voluntaryExitData.ValidatorStatus = validator.Status.String() + } - if voluntaryExitData.ShowUpcheck { - voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) - voluntaryExitData.UpcheckMaximum = uint8(3) + if voluntaryExitData.ShowUpcheck { + voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3)) + voluntaryExitData.UpcheckMaximum = uint8(3) + } } } diff --git a/indexer/beacon/bidcache.go b/indexer/beacon/bidcache.go new file mode 100644 index 000000000..3cdbe4bc5 --- /dev/null +++ b/indexer/beacon/bidcache.go @@ -0,0 +1,211 @@ +package beacon + +import ( + "sync" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/jmoiron/sqlx" +) + +const ( + // bidCacheMaxSlots is the maximum number of slots to keep in the cache + bidCacheMaxSlots = 15 + // bidCacheFlushThreshold is the slot span that triggers a flush + bidCacheFlushThreshold = 15 + // bidCacheRetainSlots is the number of slots to retain after a flush + bidCacheRetainSlots = 10 +) + +// bidCacheKey uniquely identifies a bid in the cache +type bidCacheKey struct { + ParentRoot phase0.Root + ParentHash phase0.Hash32 + BlockHash phase0.Hash32 + BuilderIndex int64 +} + +// blockBidCache caches execution payload bids for recent blocks. +// Bids for older slots are ignored. The cache is flushed to DB on shutdown +// or when the slot span exceeds the threshold. +type blockBidCache struct { + indexer *Indexer + cacheMutex sync.RWMutex + bids map[bidCacheKey]*dbtypes.BlockBid + minSlot phase0.Slot + maxSlot phase0.Slot +} + +// newBlockBidCache creates a new instance of blockBidCache. +func newBlockBidCache(indexer *Indexer) *blockBidCache { + return &blockBidCache{ + indexer: indexer, + bids: make(map[bidCacheKey]*dbtypes.BlockBid, 64), + } +} + +// loadFromDB loads bids from the last N slots from the database. +func (cache *blockBidCache) loadFromDB(currentSlot phase0.Slot) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + minSlot := phase0.Slot(0) + if currentSlot > bidCacheRetainSlots { + minSlot = currentSlot - bidCacheRetainSlots + } + + dbBids := db.GetBidsForSlotRange(cache.indexer.ctx, uint64(minSlot)) + for _, bid := range dbBids { + key := bidCacheKey{ + ParentRoot: phase0.Root(bid.ParentRoot), + ParentHash: phase0.Hash32(bid.ParentHash), + BlockHash: phase0.Hash32(bid.BlockHash), + BuilderIndex: bid.BuilderIndex, + } + cache.bids[key] = bid + + slot := phase0.Slot(bid.Slot) + if cache.minSlot == 0 || slot < cache.minSlot { + cache.minSlot = slot + } + if slot > cache.maxSlot { + cache.maxSlot = slot + } + } + + if len(dbBids) > 0 { + cache.indexer.logger.Infof("loaded %d bids from DB (slots %d-%d)", len(dbBids), cache.minSlot, cache.maxSlot) + } +} + +// AddBid adds a bid to the cache. Returns true if the bid was added, +// false if it was ignored (too old) or already exists. +func (cache *blockBidCache) AddBid(bid *dbtypes.BlockBid) bool { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + slot := phase0.Slot(bid.Slot) + + // Ignore bids for slots that are too old + if cache.maxSlot > 0 && slot+bidCacheMaxSlots < cache.maxSlot { + return false + } + + key := bidCacheKey{ + ParentRoot: phase0.Root(bid.ParentRoot), + ParentHash: phase0.Hash32(bid.ParentHash), + BlockHash: phase0.Hash32(bid.BlockHash), + BuilderIndex: bid.BuilderIndex, + } + + // Check if bid already exists + if _, exists := cache.bids[key]; exists { + return false + } + + cache.bids[key] = bid + + // Update slot bounds + if cache.minSlot == 0 || slot < cache.minSlot { + cache.minSlot = slot + } + if slot > cache.maxSlot { + cache.maxSlot = slot + } + + return true +} + +// GetBidsForBlockRoot returns all bids for a given parent block root. +func (cache *blockBidCache) GetBidsForBlockRoot(blockRoot phase0.Root) []*dbtypes.BlockBid { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + result := make([]*dbtypes.BlockBid, 0) + for key, bid := range cache.bids { + if key.ParentRoot == blockRoot { + result = append(result, bid) + } + } + + return result +} + +// checkAndFlush checks if the cache needs to be flushed and performs the flush if necessary. +// This should be called periodically (e.g., on each new block). +func (cache *blockBidCache) checkAndFlush() error { + cache.cacheMutex.Lock() + + // Check if we need to flush + if cache.maxSlot == 0 || cache.maxSlot-cache.minSlot < bidCacheFlushThreshold { + cache.cacheMutex.Unlock() + return nil + } + + // Calculate the cutoff slot - we'll flush bids older than this + cutoffSlot := cache.maxSlot - bidCacheRetainSlots + + // Collect bids to flush (from minSlot to cutoffSlot) + bidsToFlush := make([]*dbtypes.BlockBid, 0) + for key, bid := range cache.bids { + if phase0.Slot(bid.Slot) < cutoffSlot { + bidsToFlush = append(bidsToFlush, bid) + delete(cache.bids, key) + } + } + + // Update minSlot + cache.minSlot = cutoffSlot + + cache.cacheMutex.Unlock() + + // Write to DB outside of lock + if len(bidsToFlush) > 0 { + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + return db.InsertBids(bidsToFlush, tx) + }) + if err != nil { + cache.indexer.logger.Errorf("error flushing bids to db: %v", err) + return err + } + cache.indexer.logger.Debugf("flushed %d bids to DB (slots < %d)", len(bidsToFlush), cutoffSlot) + } + + return nil +} + +// flushAll flushes all cached bids to the database. +// This should be called on shutdown. +func (cache *blockBidCache) flushAll() error { + cache.cacheMutex.Lock() + + if len(cache.bids) == 0 { + cache.cacheMutex.Unlock() + return nil + } + + bidsToFlush := make([]*dbtypes.BlockBid, 0, len(cache.bids)) + for _, bid := range cache.bids { + bidsToFlush = append(bidsToFlush, bid) + } + + // Clear the cache + cache.bids = make(map[bidCacheKey]*dbtypes.BlockBid, 64) + cache.minSlot = 0 + cache.maxSlot = 0 + + cache.cacheMutex.Unlock() + + // Write to DB outside of lock + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + return db.InsertBids(bidsToFlush, tx) + }) + if err != nil { + cache.indexer.logger.Errorf("error flushing all bids to db: %v", err) + return err + } + + cache.indexer.logger.Infof("flushed %d bids to DB on shutdown", len(bidsToFlush)) + return nil +} diff --git a/indexer/beacon/block.go b/indexer/beacon/block.go index c3914047e..55c185737 100644 --- a/indexer/beacon/block.go +++ b/indexer/beacon/block.go @@ -3,11 +3,13 @@ package beacon import ( "context" "fmt" + "math" "math/rand/v2" "sync" "time" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" btypes "github.com/ethpandaops/dora/blockdb/types" @@ -20,36 +22,41 @@ import ( // Block represents a beacon block. type Block struct { - Root phase0.Root - Slot phase0.Slot - BlockUID uint64 - dynSsz *dynssz.DynSsz - parentRoot *phase0.Root - dependentRoot *phase0.Root - forkId ForkKey - forkChecked bool - headerMutex sync.Mutex - headerChan chan bool - header *phase0.SignedBeaconBlockHeader - blockMutex sync.Mutex - blockChan chan bool - block *spec.VersionedSignedBeaconBlock - blockIndex *BlockBodyIndex - recvDelay int32 - executionTimes []ExecutionTime // execution times from snooper clients - minExecutionTime uint16 - maxExecutionTime uint16 - execTimeUpdate *time.Ticker - executionTimesMux sync.RWMutex - isInFinalizedDb bool // block is in finalized table (slots) - isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks) - isDisposed bool // block is disposed - processingStatus dbtypes.UnfinalizedBlockStatus - seenMutex sync.RWMutex - seenMap map[uint16]*Client - processedActivity uint8 - blockResults [][]uint8 - blockResultsMutex sync.Mutex + Root phase0.Root + Slot phase0.Slot + BlockUID uint64 + dynSsz *dynssz.DynSsz + parentRoot *phase0.Root + dependentRoot *phase0.Root + forkId ForkKey + forkChecked bool + headerMutex sync.Mutex + headerChan chan bool + header *phase0.SignedBeaconBlockHeader + blockMutex sync.Mutex + blockChan chan bool + block *spec.VersionedSignedBeaconBlock + executionPayloadMutex sync.Mutex + executionPayloadChan chan bool + executionPayload *gloas.SignedExecutionPayloadEnvelope + blockIndex *BlockBodyIndex + recvDelay int32 + executionTimes []ExecutionTime // execution times from snooper clients + minExecutionTime uint16 + maxExecutionTime uint16 + execTimeUpdate *time.Ticker + executionTimesMux sync.RWMutex + isInFinalizedDb bool // block is in finalized table (slots) + isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks) + hasExecutionPayload bool // block has an execution payload (either in cache or db) + isPayloadOrphaned bool // payload is orphaned (next block doesn't build on it) + isDisposed bool // block is disposed + processingStatus dbtypes.UnfinalizedBlockStatus + seenMutex sync.RWMutex + seenMap map[uint16]*Client + processedActivity uint8 + blockResults [][]uint8 + blockResultsMutex sync.Mutex } // BlockBodyIndex holds important block properties that are used as index for cache lookups. @@ -58,10 +65,12 @@ type BlockBodyIndex struct { Graffiti [32]byte ExecutionExtraData []byte ExecutionHash phase0.Hash32 + ExecutionParentHash phase0.Hash32 ExecutionNumber uint64 SyncParticipation float32 EthTransactionCount uint64 BlobCount uint64 + BuilderIndex uint64 GasUsed uint64 GasLimit uint64 BlockSize uint64 @@ -69,21 +78,16 @@ type BlockBodyIndex struct { // newBlock creates a new Block instance. func newBlock(dynSsz *dynssz.DynSsz, root phase0.Root, slot phase0.Slot, blockUID uint64) *Block { - if blockUID == 0 { - // use highest possible block UID as default - blockUID = (uint64(slot) << 16) | 0xffff + return &Block{ + Root: root, + Slot: slot, + BlockUID: blockUID, + dynSsz: dynSsz, + seenMap: make(map[uint16]*Client), + headerChan: make(chan bool), + blockChan: make(chan bool), + executionPayloadChan: make(chan bool), } - block := &Block{ - Root: root, - Slot: slot, - BlockUID: blockUID, - dynSsz: dynSsz, - seenMap: make(map[uint16]*Client), - headerChan: make(chan bool), - blockChan: make(chan bool), - } - - return block } func (block *Block) Dispose() { @@ -170,7 +174,7 @@ func (block *Block) GetBlock(ctx context.Context) *spec.VersionedSignedBeaconBlo } if block.isInUnfinalizedDb { - dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:]) + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, false) if dbBlock != nil { blockBody, err := UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ) if err == nil { @@ -188,6 +192,10 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec return nil } + if block.block != nil { + return block.block + } + if ctx == nil { ctx = context.Background() } @@ -201,6 +209,40 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec return block.block } +// GetExecutionPayload returns the execution payload of this block. +func (block *Block) GetExecutionPayload(ctx context.Context) *gloas.SignedExecutionPayloadEnvelope { + if block.executionPayload != nil { + return block.executionPayload + } + + if block.hasExecutionPayload && block.isInUnfinalizedDb { + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, false, true) + if dbBlock != nil { + payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + if err == nil { + return payload + } + } + } + + return nil +} + +// AwaitExecutionPayload waits for the execution payload of this block to be available. +func (block *Block) AwaitExecutionPayload(ctx context.Context, timeout time.Duration) *gloas.SignedExecutionPayloadEnvelope { + if ctx == nil { + ctx = context.Background() + } + + select { + case <-block.executionPayloadChan: + case <-time.After(timeout): + case <-ctx.Done(): + } + + return block.executionPayload +} + // GetParentRoot returns the parent root of this block. func (block *Block) GetParentRoot() *phase0.Root { if block.isDisposed { @@ -264,7 +306,7 @@ func (block *Block) SetBlock(body *spec.VersionedSignedBeaconBlock) { return } - block.setBlockIndex(body) + block.setBlockIndex(body, nil) block.block = body if block.blockChan != nil { @@ -295,7 +337,7 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo return false, err } - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, nil) block.block = blockBody if block.blockChan != nil { close(block.blockChan) @@ -305,35 +347,106 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo return true, nil } +// SetExecutionPayload sets the execution payload of this block. +func (block *Block) SetExecutionPayload(payload *gloas.SignedExecutionPayloadEnvelope) { + block.setBlockIndex(block.block, payload) + block.executionPayload = payload + block.hasExecutionPayload = true + + if block.executionPayloadChan != nil { + close(block.executionPayloadChan) + block.executionPayloadChan = nil + } +} + +// EnsureExecutionPayload ensures that the execution payload of this block is available. +func (block *Block) EnsureExecutionPayload(loadExecutionPayload func() (*gloas.SignedExecutionPayloadEnvelope, error)) (bool, error) { + if block.executionPayload != nil { + return false, nil + } + + if block.hasExecutionPayload { + return false, nil + } + + block.executionPayloadMutex.Lock() + defer block.executionPayloadMutex.Unlock() + + if block.executionPayload != nil { + return false, nil + } + + payload, err := loadExecutionPayload() + if err != nil { + return false, err + } + + if payload == nil { + return false, nil + } + + block.setBlockIndex(block.block, payload) + block.executionPayload = payload + block.hasExecutionPayload = true + if block.executionPayloadChan != nil { + close(block.executionPayloadChan) + block.executionPayloadChan = nil + } + + return true, nil +} + // setBlockIndex sets the block index of this block. -func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock) { +func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) { if body == nil { return } - blockIndex := &BlockBodyIndex{} - blockIndex.Graffiti, _ = body.Graffiti() + blockIndex := block.blockIndex + if blockIndex == nil { + blockIndex = &BlockBodyIndex{} + } + + if body != nil { + blockIndex.Graffiti, _ = body.Graffiti() + blockIndex.ExecutionExtraData, _ = getBlockExecutionExtraData(body) + blockIndex.ExecutionHash, _ = body.ExecutionBlockHash() + if execNumber, err := body.ExecutionBlockNumber(); err == nil { + blockIndex.ExecutionNumber = uint64(execNumber) + } + if transactions, err := body.ExecutionTransactions(); err == nil { + blockIndex.EthTransactionCount = uint64(len(transactions)) + } + if blobKzgCommitments, err := body.BlobKZGCommitments(); err == nil { + blockIndex.BlobCount = uint64(len(blobKzgCommitments)) + } + if builderIndex, err := getBlockPayloadBuilderIndex(body); err == nil { + blockIndex.BuilderIndex = uint64(builderIndex) + } else { + blockIndex.BuilderIndex = math.MaxUint64 + } + if parentHash, err := getBlockExecutionParentHash(body); err == nil { + blockIndex.ExecutionParentHash = parentHash + } + if executionPayload, err := body.ExecutionPayload(); err == nil { + gasUsed, _ := executionPayload.GasUsed() + blockIndex.GasUsed = gasUsed - executionPayload, _ := body.ExecutionPayload() - if executionPayload != nil { - blockIndex.ExecutionExtraData, _ = executionPayload.ExtraData() - blockIndex.ExecutionHash, _ = executionPayload.BlockHash() - blockIndex.ExecutionNumber, _ = executionPayload.BlockNumber() + gasLimit, _ := executionPayload.GasLimit() + blockIndex.GasLimit = gasLimit + } + } + if payload != nil { + blockIndex.ExecutionNumber = uint64(payload.Message.Payload.BlockNumber) + blockIndex.ExecutionParentHash = payload.Message.Payload.ParentHash // Calculate transaction count - executionTransactions, _ := executionPayload.Transactions() + executionTransactions := payload.Message.Payload.Transactions blockIndex.EthTransactionCount = uint64(len(executionTransactions)) - // Calculate blob count - blobKzgCommitments, _ := body.BlobKZGCommitments() - blockIndex.BlobCount = uint64(len(blobKzgCommitments)) - // Get gas used and gas limit - gasUsed, _ := executionPayload.GasUsed() - blockIndex.GasUsed = gasUsed - - gasLimit, _ := executionPayload.GasLimit() - blockIndex.GasLimit = gasLimit + blockIndex.GasUsed = payload.Message.Payload.GasUsed + blockIndex.GasLimit = payload.Message.Payload.GasLimit } // Calculate block size @@ -372,7 +485,7 @@ func (block *Block) GetBlockIndex(ctx context.Context) *BlockBodyIndex { blockBody := block.GetBlock(ctx) if blockBody != nil { - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, block.GetExecutionPayload(ctx)) } return block.blockIndex @@ -432,14 +545,25 @@ func (block *Block) buildOrphanedBlock(ctx context.Context, compress bool) (*dbt return nil, fmt.Errorf("marshal block ssz failed: %v", err) } - return &dbtypes.OrphanedBlock{ + orphanedBlock := &dbtypes.OrphanedBlock{ Root: block.Root[:], HeaderVer: 1, HeaderSSZ: headerSSZ, BlockVer: blockVer, BlockSSZ: blockSSZ, BlockUid: block.BlockUID, - }, nil + } + + if block.executionPayload != nil { + payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress) + if err != nil { + return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err) + } + orphanedBlock.PayloadVer = payloadVer + orphanedBlock.PayloadSSZ = payloadSSZ + } + + return orphanedBlock, nil } func (block *Block) writeToBlockDb(ctx context.Context) error { @@ -447,7 +571,7 @@ func (block *Block) writeToBlockDb(ctx context.Context) error { return nil } - _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) { + _, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) { headerSSZ, err := block.header.MarshalSSZ() if err != nil { return nil, fmt.Errorf("marshal header ssz failed: %v", err) @@ -478,9 +602,12 @@ func (block *Block) unpruneBlockBody(ctx context.Context) { return } - dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:]) + dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, true) if dbBlock != nil { block.block, _ = UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ) + if len(dbBlock.PayloadSSZ) > 0 { + block.executionPayload, _ = UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + } } } diff --git a/indexer/beacon/block_helper.go b/indexer/beacon/block_helper.go index c943ede1c..979579a0c 100644 --- a/indexer/beacon/block_helper.go +++ b/indexer/beacon/block_helper.go @@ -10,6 +10,7 @@ import ( "github.com/attestantio/go-eth2-client/spec/capella" "github.com/attestantio/go-eth2-client/spec/deneb" "github.com/attestantio/go-eth2-client/spec/electra" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/utils" dynssz "github.com/pk910/dynamic-ssz" @@ -47,6 +48,9 @@ func MarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, block *spec.Ver case spec.DataVersionFulu: version = uint64(block.Version) ssz, err = dynSsz.MarshalSSZ(block.Fulu) + case spec.DataVersionGloas: + version = uint64(block.Version) + ssz, err = dynSsz.MarshalSSZ(block.Gloas) default: err = fmt.Errorf("unknown block version") } @@ -118,6 +122,11 @@ func UnmarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, version uint6 if err := dynSsz.UnmarshalSSZ(block.Fulu, ssz); err != nil { return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err) } + case spec.DataVersionGloas: + block.Gloas = &gloas.SignedBeaconBlock{} + if err := dynSsz.UnmarshalSSZ(block.Gloas, ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err) + } default: return nil, fmt.Errorf("unknown block version") } @@ -148,6 +157,9 @@ func MarshalVersionedSignedBeaconBlockJson(block *spec.VersionedSignedBeaconBloc case spec.DataVersionFulu: version = uint64(block.Version) jsonRes, err = block.Fulu.MarshalJSON() + case spec.DataVersionGloas: + version = uint64(block.Version) + jsonRes, err = block.Gloas.MarshalJSON() default: err = fmt.Errorf("unknown block version") } @@ -201,12 +213,195 @@ func unmarshalVersionedSignedBeaconBlockJson(version uint64, ssz []byte) (*spec. if err := block.Fulu.UnmarshalJSON(ssz); err != nil { return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err) } + case spec.DataVersionGloas: + block.Gloas = &gloas.SignedBeaconBlock{} + if err := block.Gloas.UnmarshalJSON(ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err) + } default: return nil, fmt.Errorf("unknown block version") } return block, nil } +// MarshalVersionedSignedExecutionPayloadEnvelopeSSZ marshals a signed execution payload envelope using SSZ encoding. +func MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, payload *gloas.SignedExecutionPayloadEnvelope, compress bool) (version uint64, ssz []byte, err error) { + if utils.Config.KillSwitch.DisableSSZEncoding { + // SSZ encoding disabled, use json instead + version, ssz, err = marshalVersionedSignedExecutionPayloadEnvelopeJson(payload) + } else { + // SSZ encoding + version = uint64(spec.DataVersionGloas) + ssz, err = dynSsz.MarshalSSZ(payload) + } + + if compress { + ssz = compressBytes(ssz) + version |= compressionFlag + } + + return +} + +// UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ unmarshals a versioned signed execution payload envelope using SSZ encoding. +func UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) { + if (version & compressionFlag) != 0 { + // decompress + if d, err := decompressBytes(ssz); err != nil { + return nil, fmt.Errorf("failed to decompress: %v", err) + } else { + ssz = d + version &= ^compressionFlag + } + } + + if (version & jsonVersionFlag) != 0 { + // JSON encoding + return unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version, ssz) + } + + if version != uint64(spec.DataVersionGloas) { + return nil, fmt.Errorf("unknown version") + } + + // SSZ encoding + payload := &gloas.SignedExecutionPayloadEnvelope{} + if err := dynSsz.UnmarshalSSZ(payload, ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err) + } + + return payload, nil +} + +// marshalVersionedSignedExecutionPayloadEnvelopeJson marshals a versioned signed execution payload envelope using JSON encoding. +func marshalVersionedSignedExecutionPayloadEnvelopeJson(payload *gloas.SignedExecutionPayloadEnvelope) (version uint64, jsonRes []byte, err error) { + version = uint64(spec.DataVersionGloas) + jsonRes, err = payload.MarshalJSON() + + version |= jsonVersionFlag + + return +} + +// unmarshalVersionedSignedExecutionPayloadEnvelopeJson unmarshals a versioned signed execution payload envelope using JSON encoding. +func unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) { + if version&jsonVersionFlag == 0 { + return nil, fmt.Errorf("no json encoding") + } + + if version-jsonVersionFlag != uint64(spec.DataVersionGloas) { + return nil, fmt.Errorf("unknown version") + } + + payload := &gloas.SignedExecutionPayloadEnvelope{} + if err := payload.UnmarshalJSON(ssz); err != nil { + return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err) + } + return payload, nil +} + +// getBlockExecutionExtraData returns the extra data from the execution payload of a versioned signed beacon block. +func getBlockExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) { + switch v.Version { + case spec.DataVersionBellatrix: + if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no bellatrix block") + } + + return v.Bellatrix.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionCapella: + if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no capella block") + } + + return v.Capella.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionDeneb: + if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no deneb block") + } + + return v.Deneb.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil { + return nil, errors.New("no electra block") + } + + return v.Electra.Message.Body.ExecutionPayload.ExtraData, nil + case spec.DataVersionGloas: + return nil, nil + default: + return nil, errors.New("unknown version") + } +} + +// getBlockPayloadBuilderIndex returns the builder index from the execution payload of a versioned signed beacon block. +func getBlockPayloadBuilderIndex(v *spec.VersionedSignedBeaconBlock) (gloas.BuilderIndex, error) { + switch v.Version { + case spec.DataVersionPhase0: + return 0, errors.New("no builder index in phase0 block") + case spec.DataVersionAltair: + return 0, errors.New("no builder index in altair block") + case spec.DataVersionBellatrix: + return 0, errors.New("no builder index in bellatrix block") + case spec.DataVersionCapella: + return 0, errors.New("no builder index in capella block") + case spec.DataVersionDeneb: + return 0, errors.New("no builder index in deneb block") + case spec.DataVersionElectra: + return 0, errors.New("no builder index in electra block") + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil { + return 0, errors.New("no gloas block") + } + + return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.BuilderIndex, nil + default: + return 0, errors.New("unknown version") + } +} + +// getBlockExecutionParentHash returns the parent hash from the execution payload of a versioned signed beacon block. +func getBlockExecutionParentHash(v *spec.VersionedSignedBeaconBlock) (phase0.Hash32, error) { + switch v.Version { + case spec.DataVersionPhase0: + return phase0.Hash32{}, errors.New("no parent hash in phase0 block") + case spec.DataVersionAltair: + return phase0.Hash32{}, errors.New("no parent hash in altair block") + case spec.DataVersionBellatrix: + if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no bellatrix block") + } + + return v.Bellatrix.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionCapella: + if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no capella block") + } + + return v.Capella.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionDeneb: + if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no deneb block") + } + + return v.Deneb.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil { + return phase0.Hash32{}, errors.New("no electra block") + } + + return v.Electra.Message.Body.ExecutionPayload.ParentHash, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil { + return phase0.Hash32{}, errors.New("no gloas block") + } + + return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.ParentBlockHash, nil + default: + return phase0.Hash32{}, errors.New("unknown version") + } +} + // getStateRandaoMixes returns the RANDAO mixes from a versioned beacon state. func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) { switch v.Version { @@ -252,6 +447,12 @@ func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) { } return v.Fulu.RANDAOMixes, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.RANDAOMixes == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.RANDAOMixes, nil default: return nil, errors.New("unknown version") } @@ -274,6 +475,8 @@ func getStateDepositIndex(state *spec.VersionedBeaconState) uint64 { return state.Electra.ETH1DepositIndex case spec.DataVersionFulu: return state.Fulu.ETH1DepositIndex + case spec.DataVersionGloas: + return state.Gloas.ETH1DepositIndex } return 0 } @@ -319,6 +522,12 @@ func getStateCurrentSyncCommittee(v *spec.VersionedBeaconState) ([]phase0.BLSPub } return v.Fulu.CurrentSyncCommittee.Pubkeys, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.CurrentSyncCommittee == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.CurrentSyncCommittee.Pubkeys, nil default: return nil, errors.New("unknown version") } @@ -349,6 +558,12 @@ func getStateDepositBalanceToConsume(v *spec.VersionedBeaconState) (phase0.Gwei, } return v.Fulu.DepositBalanceToConsume, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return 0, errors.New("no gloas block") + } + + return v.Gloas.DepositBalanceToConsume, nil default: return 0, errors.New("unknown version") } @@ -368,17 +583,23 @@ func getStatePendingDeposits(v *spec.VersionedBeaconState) ([]*electra.PendingDe case spec.DataVersionDeneb: return nil, errors.New("no pending deposits in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingDeposits == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingDeposits, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingDeposits == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingDeposits, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingDeposits, nil default: return nil, errors.New("unknown version") } @@ -398,17 +619,23 @@ func getStatePendingWithdrawals(v *spec.VersionedBeaconState) ([]*electra.Pendin case spec.DataVersionDeneb: return nil, errors.New("no pending withdrawals in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingPartialWithdrawals == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingPartialWithdrawals, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingPartialWithdrawals == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingPartialWithdrawals, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingPartialWithdrawals, nil default: return nil, errors.New("unknown version") } @@ -428,17 +655,23 @@ func getStatePendingConsolidations(v *spec.VersionedBeaconState) ([]*electra.Pen case spec.DataVersionDeneb: return nil, errors.New("no pending consolidations in deneb") case spec.DataVersionElectra: - if v.Electra == nil || v.Electra.PendingConsolidations == nil { + if v.Electra == nil { return nil, errors.New("no electra block") } return v.Electra.PendingConsolidations, nil case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.PendingConsolidations == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.PendingConsolidations, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.PendingConsolidations, nil default: return nil, errors.New("unknown version") } @@ -460,11 +693,74 @@ func getStateProposerLookahead(v *spec.VersionedBeaconState) ([]phase0.Validator case spec.DataVersionElectra: return nil, errors.New("no proposer lookahead in electra") case spec.DataVersionFulu: - if v.Fulu == nil || v.Fulu.ProposerLookahead == nil { + if v.Fulu == nil { return nil, errors.New("no fulu block") } return v.Fulu.ProposerLookahead, nil + case spec.DataVersionGloas: + if v.Gloas == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.ProposerLookahead, nil + default: + return nil, errors.New("unknown version") + } +} + +// getStateProposerLookahead returns the proposer lookahead from a versioned beacon state. +func getStateBlockRoots(v *spec.VersionedBeaconState) ([]phase0.Root, error) { + switch v.Version { + + case spec.DataVersionPhase0: + if v.Phase0 == nil || v.Phase0.BlockRoots == nil { + return nil, errors.New("no phase0 block") + } + + return v.Phase0.BlockRoots, nil + case spec.DataVersionAltair: + if v.Altair == nil || v.Altair.BlockRoots == nil { + return nil, errors.New("no altair block") + } + + return v.Altair.BlockRoots, nil + case spec.DataVersionBellatrix: + if v.Bellatrix == nil || v.Bellatrix.BlockRoots == nil { + return nil, errors.New("no bellatrix block") + } + + return v.Bellatrix.BlockRoots, nil + case spec.DataVersionCapella: + if v.Capella == nil || v.Capella.BlockRoots == nil { + return nil, errors.New("no capella block") + } + + return v.Capella.BlockRoots, nil + case spec.DataVersionDeneb: + if v.Deneb == nil || v.Deneb.BlockRoots == nil { + return nil, errors.New("no deneb block") + } + + return v.Deneb.BlockRoots, nil + case spec.DataVersionElectra: + if v.Electra == nil || v.Electra.BlockRoots == nil { + return nil, errors.New("no electra block") + } + + return v.Electra.BlockRoots, nil + case spec.DataVersionFulu: + if v.Fulu == nil || v.Fulu.BlockRoots == nil { + return nil, errors.New("no fulu block") + } + + return v.Fulu.BlockRoots, nil + case spec.DataVersionGloas: + if v.Gloas == nil || v.Gloas.BlockRoots == nil { + return nil, errors.New("no gloas block") + } + + return v.Gloas.BlockRoots, nil default: return nil, errors.New("unknown version") } @@ -487,6 +783,8 @@ func getBlockSize(dynSsz *dynssz.DynSsz, block *spec.VersionedSignedBeaconBlock) return dynSsz.SizeSSZ(block.Electra) case spec.DataVersionFulu: return dynSsz.SizeSSZ(block.Fulu) + case spec.DataVersionGloas: + return dynSsz.SizeSSZ(block.Gloas) default: return 0, errors.New("unknown version") } diff --git a/indexer/beacon/buildercache.go b/indexer/beacon/buildercache.go new file mode 100644 index 000000000..14d506aa9 --- /dev/null +++ b/indexer/beacon/buildercache.go @@ -0,0 +1,739 @@ +package beacon + +import ( + "bytes" + "fmt" + "hash/crc64" + "math" + "runtime/debug" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec/gloas" + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/jmoiron/sqlx" + + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" +) + +// BuilderIndexFlag separates builder indices from validator indices in the pubkey cache +const BuilderIndexFlag = uint64(1 << 40) + +// Builder status flag constants representing different builder states +const ( + BuilderStatusExited uint16 = 1 << iota // Builder has exited (withdrawable_epoch reached) + BuilderStatusSuperseded // Builder index was reused, this pubkey is no longer active +) + +// builderCache manages the in-memory cache of builder states and handles updates +type builderCache struct { + indexer *Indexer + builderSetCache []*builderEntry + cacheMutex sync.RWMutex + triggerDbUpdate chan bool +} + +// builderEntry represents a single builder's state in the cache +type builderEntry struct { + builderDiffs []*builderDiff + finalChecksum uint64 + finalBuilder *gloas.Builder + activeData *BuilderData + statusFlags uint16 +} + +// BuilderData contains the essential builder state information for active builders. +// Only WithdrawableEpoch can change during a builder's lifetime; all other fields are static. +type BuilderData struct { + WithdrawableEpoch phase0.Epoch +} + +// builderDiff represents an updated builder entry in the builder set cache. +type builderDiff struct { + epoch phase0.Epoch + dependentRoot phase0.Root + builder *gloas.Builder +} + +// newBuilderCache initializes a new builder cache instance and starts the persist loop +func newBuilderCache(indexer *Indexer) *builderCache { + cache := &builderCache{ + indexer: indexer, + triggerDbUpdate: make(chan bool, 1), + } + + go cache.runPersistLoop() + + return cache +} + +// updateBuilderSet processes builder set updates and maintains the cache state +func (cache *builderCache) updateBuilderSet(slot phase0.Slot, dependentRoot phase0.Root, builders []*gloas.Builder) { + chainState := cache.indexer.consensusPool.GetChainState() + epoch := chainState.EpochOfSlot(slot) + currentEpoch := chainState.CurrentEpoch() + finalizedEpoch, finalizedRoot := chainState.GetFinalizedCheckpoint() + cutOffEpoch := phase0.Epoch(0) + if currentEpoch > phase0.Epoch(cache.indexer.inMemoryEpochs) { + cutOffEpoch = currentEpoch - phase0.Epoch(cache.indexer.inMemoryEpochs) + } + if cutOffEpoch > finalizedEpoch { + cutOffEpoch = finalizedEpoch + } + + if epoch < cutOffEpoch { + cache.indexer.logger.Infof("ignoring old builder set update for epoch %d", epoch) + return + } + + isFinalizedBuilderSet := false + if slot == 0 { + isFinalizedBuilderSet = true // genesis + } else if epoch <= finalizedEpoch { + finalizedBlock := cache.indexer.blockCache.getBlockByRoot(finalizedRoot) + if finalizedBlock != nil { + finalizedDependentBlock := cache.indexer.blockCache.getDependentBlock(chainState, finalizedBlock, nil) + if finalizedDependentBlock != nil && bytes.Equal(finalizedDependentBlock.Root[:], dependentRoot[:]) { + isFinalizedBuilderSet = true + } + } + } + + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + t1 := time.Now() + + if len(cache.builderSetCache) < len(builders) { + if len(builders) > cap(cache.builderSetCache) { + newCache := make([]*builderEntry, len(builders), len(builders)+1000) + copy(newCache, cache.builderSetCache) + cache.builderSetCache = newCache + } else { + cache.builderSetCache = cache.builderSetCache[:len(builders)] + } + } + + isParentMap := map[phase0.Root]bool{} + isAheadMap := map[phase0.Root]bool{} + updatedCount := uint64(0) + + for i := range builders { + var parentChecksum uint64 + var parentBuilder *gloas.Builder + parentEpoch := phase0.Epoch(0) + + aheadDiffIdx := 0 + foundAhead := false + aheadEpoch := phase0.Epoch(math.MaxInt64) + + cachedBuilder := cache.builderSetCache[i] + if cachedBuilder == nil { + cachedBuilder = &builderEntry{} + cache.builderSetCache[i] = cachedBuilder + + cache.indexer.pubkeyCache.Add(builders[i].PublicKey, phase0.ValidatorIndex(uint64(i)|BuilderIndexFlag)) + } else { + parentBuilder = cachedBuilder.finalBuilder + parentChecksum = cachedBuilder.finalChecksum + } + + deleteKeys := []int{} + + if !isFinalizedBuilderSet { + for diffkey, diff := range cachedBuilder.builderDiffs { + if diff.epoch < cutOffEpoch { + deleteKeys = append(deleteKeys, diffkey) + continue + } + + if diff.epoch < epoch { + isParent, checkedParent := isParentMap[diff.dependentRoot] + if !checkedParent { + isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, dependentRoot) + isParentMap[diff.dependentRoot] = isParent + } + + if isParent && diff.epoch > parentEpoch { + parentBuilder = diff.builder + parentEpoch = diff.epoch + } + } + + if diff.epoch > epoch { + isAhead, checkedAhead := isAheadMap[diff.dependentRoot] + if !checkedAhead { + isAhead = cache.indexer.blockCache.isCanonicalBlock(dependentRoot, diff.dependentRoot) + isAheadMap[diff.dependentRoot] = isAhead + } + + if isAhead && diff.epoch < aheadEpoch { + aheadDiffIdx = diffkey + aheadEpoch = diff.epoch + foundAhead = true + } + } + } + + if parentBuilder != nil { + parentChecksum = calculateBuilderChecksum(parentBuilder) + } + } + + checksum := calculateBuilderChecksum(builders[i]) + if checksum == parentChecksum { + continue + } + + if isFinalizedBuilderSet { + cachedBuilder.finalBuilder = builders[i] + cachedBuilder.finalChecksum = checksum + cachedBuilder.statusFlags = GetBuilderStatusFlags(builders[i]) + updatedCount++ + + activeData := &BuilderData{ + WithdrawableEpoch: builders[i].WithdrawableEpoch, + } + if cache.isActiveBuilder(activeData) { + cachedBuilder.activeData = activeData + } + } + + if foundAhead && cache.checkBuilderEqual(cachedBuilder.builderDiffs[aheadDiffIdx].builder, builders[i]) { + if isFinalizedBuilderSet { + deleteKeys = append(deleteKeys, aheadDiffIdx) + } else { + diff := cachedBuilder.builderDiffs[aheadDiffIdx] + diff.epoch = epoch + diff.dependentRoot = dependentRoot + cachedBuilder.builderDiffs[aheadDiffIdx] = diff + } + } else if isFinalizedBuilderSet { + } else if len(deleteKeys) == 0 { + cachedBuilder.builderDiffs = append(cachedBuilder.builderDiffs, &builderDiff{ + epoch: epoch, + dependentRoot: dependentRoot, + builder: builders[i], + }) + } else { + cachedBuilder.builderDiffs[deleteKeys[0]] = &builderDiff{ + epoch: epoch, + dependentRoot: dependentRoot, + builder: builders[i], + } + deleteKeys = deleteKeys[1:] + } + + if len(deleteKeys) > 0 { + lastIdx := len(cachedBuilder.builderDiffs) - 1 + delLen := len(deleteKeys) + for delIdx := 0; delIdx < delLen; delIdx++ { + for delLen > 0 && deleteKeys[delLen-1] == lastIdx { + lastIdx-- + delLen-- + } + if delLen == 0 { + break + } + cachedBuilder.builderDiffs[deleteKeys[delIdx]] = cachedBuilder.builderDiffs[lastIdx] + lastIdx-- + } + + cachedBuilder.builderDiffs = cachedBuilder.builderDiffs[:lastIdx+1] + } + } + + if updatedCount > 0 { + select { + case cache.triggerDbUpdate <- true: + default: + } + } + + isFinalizedStr := "" + if isFinalizedBuilderSet { + isFinalizedStr = "finalized " + } + cache.indexer.logger.Infof("processed %vbuilder set update for epoch %d in %v", isFinalizedStr, epoch, time.Since(t1)) +} + +// checkBuilderEqual compares two builder states for equality +func (cache *builderCache) checkBuilderEqual(builder1 *gloas.Builder, builder2 *gloas.Builder) bool { + if builder1 == nil && builder2 == nil { + return true + } + if builder1 == nil || builder2 == nil { + return false + } + return bytes.Equal(builder1.PublicKey[:], builder2.PublicKey[:]) && + builder1.Version == builder2.Version && + bytes.Equal(builder1.ExecutionAddress[:], builder2.ExecutionAddress[:]) && + builder1.DepositEpoch == builder2.DepositEpoch && + builder1.WithdrawableEpoch == builder2.WithdrawableEpoch +} + +// GetBuilderStatusFlags calculates the status flags for a builder +func GetBuilderStatusFlags(builder *gloas.Builder) uint16 { + flags := uint16(0) + if builder.WithdrawableEpoch != FarFutureEpoch { + flags |= BuilderStatusExited + } + return flags +} + +// getBuilderSetSize returns the current number of builders in the builder set +func (cache *builderCache) getBuilderSetSize() uint64 { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + return uint64(len(cache.builderSetCache)) +} + +// setFinalizedEpoch updates the builder cache when a new epoch is finalized +func (cache *builderCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDependentRoot phase0.Root) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + updatedCount := uint64(0) + + for _, cachedBuilder := range cache.builderSetCache { + if cachedBuilder == nil { + continue + } + + // Find the finalized builder state + for _, diff := range cachedBuilder.builderDiffs { + if diff.dependentRoot == nextEpochDependentRoot { + cachedBuilder.finalBuilder = diff.builder + cachedBuilder.finalChecksum = calculateBuilderChecksum(diff.builder) + cachedBuilder.statusFlags = GetBuilderStatusFlags(diff.builder) + updatedCount++ + + cachedBuilder.activeData = &BuilderData{ + WithdrawableEpoch: diff.builder.WithdrawableEpoch, + } + break + } + } + + // Clean up old diffs + newDiffs := make([]*builderDiff, 0) + for _, diff := range cachedBuilder.builderDiffs { + if diff.epoch > epoch { + newDiffs = append(newDiffs, diff) + } + } + cachedBuilder.builderDiffs = newDiffs + + // Clear old active data + if cachedBuilder.activeData != nil { + if !cache.isActiveBuilder(cachedBuilder.activeData) { + cachedBuilder.activeData = nil + } + } + } + + if updatedCount > 0 { + select { + case cache.triggerDbUpdate <- true: + default: + } + } +} + +// BuilderSetStreamer is a callback for streaming builder data +type BuilderSetStreamer func(index gloas.BuilderIndex, flags uint16, activeData *BuilderData, builder *gloas.Builder) error + +// streamBuilderSetForRoot streams the builder set for a given blockRoot +func (cache *builderCache) streamBuilderSetForRoot(blockRoot phase0.Root, onlyActive bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + isParentMap := map[phase0.Root]bool{} + isAheadMap := map[phase0.Root]bool{} + + for index, cachedBuilder := range cache.builderSetCache { + if cachedBuilder == nil { + continue + } + + latestBuilder := cachedBuilder.finalBuilder + builderData := cachedBuilder.activeData + builderEpoch := phase0.Epoch(0) + + var aheadBuilder *gloas.Builder + aheadEpoch := phase0.Epoch(math.MaxInt64) + + for _, diff := range cachedBuilder.builderDiffs { + isParent, checkedParent := isParentMap[diff.dependentRoot] + if !checkedParent { + isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) + isParentMap[diff.dependentRoot] = isParent + } + + if isParent && diff.epoch >= builderEpoch { + builderData = &BuilderData{ + WithdrawableEpoch: diff.builder.WithdrawableEpoch, + } + builderEpoch = diff.epoch + latestBuilder = diff.builder + } + + if !isParent && builderData == nil { + isAhead, checkedAhead := isAheadMap[diff.dependentRoot] + if !checkedAhead { + isAhead = cache.indexer.blockCache.isCanonicalBlock(blockRoot, diff.dependentRoot) + isAheadMap[diff.dependentRoot] = isAhead + } + + if isAhead && diff.epoch < aheadEpoch { + aheadBuilder = diff.builder + aheadEpoch = diff.epoch + } + } + } + + if builderData == nil && aheadBuilder != nil { + builderData = &BuilderData{ + WithdrawableEpoch: aheadBuilder.WithdrawableEpoch, + } + latestBuilder = aheadBuilder + } + + if onlyActive && (builderData == nil || (epoch != nil && builderData.WithdrawableEpoch <= *epoch)) { + continue + } + + builderFlags := cachedBuilder.statusFlags + if latestBuilder != nil { + builderFlags = GetBuilderStatusFlags(latestBuilder) + } + + err := cb(gloas.BuilderIndex(index), builderFlags, builderData, latestBuilder) + if err != nil { + return err + } + } + + return nil +} + +// UnwrapDbBuilder converts a dbtypes.Builder to a gloas.Builder +func UnwrapDbBuilder(dbBuilder *dbtypes.Builder) *gloas.Builder { + builder := &gloas.Builder{ + Version: dbBuilder.Version, + Balance: 0, // Balance not persisted + DepositEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.DepositEpoch)), + WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)), + } + copy(builder.PublicKey[:], dbBuilder.Pubkey) + copy(builder.ExecutionAddress[:], dbBuilder.ExecutionAddress) + return builder +} + +// isActiveBuilder determines if a builder is currently active +func (cache *builderCache) isActiveBuilder(builder *BuilderData) bool { + currentEpoch := cache.indexer.consensusPool.GetChainState().CurrentEpoch() + cutOffEpoch := phase0.Epoch(0) + if currentEpoch > 10 { + cutOffEpoch = currentEpoch - 10 + } + + return builder.WithdrawableEpoch > cutOffEpoch +} + +// getBuilderByIndex returns the builder by index for a given forkId +func (cache *builderCache) getBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder { + canonicalHead := cache.indexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil + } + + return cache.getBuilderByIndexAndRoot(index, canonicalHead.Root) +} + +// getBuilderByIndexAndRoot returns the builder by index for a given blockRoot +func (cache *builderCache) getBuilderByIndexAndRoot(index gloas.BuilderIndex, blockRoot phase0.Root) *gloas.Builder { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + if uint64(index) >= uint64(len(cache.builderSetCache)) { + return nil + } + + cachedBuilder := cache.builderSetCache[index] + if cachedBuilder == nil { + return nil + } + + builder := cachedBuilder.finalBuilder + builderEpoch := phase0.Epoch(0) + + // Find the latest valid diff + for _, diff := range cachedBuilder.builderDiffs { + if cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) && diff.epoch >= builderEpoch { + builder = diff.builder + builderEpoch = diff.epoch + } + } + + // Fallback to db if builder is not found in cache + if builder == nil { + if dbBuilder := db.GetActiveBuilderByIndex(cache.indexer.ctx, uint64(index)); dbBuilder != nil { + builder = UnwrapDbBuilder(dbBuilder) + } + } else { + // Return a copy + builder = &gloas.Builder{ + PublicKey: builder.PublicKey, + Version: builder.Version, + ExecutionAddress: builder.ExecutionAddress, + Balance: builder.Balance, + DepositEpoch: builder.DepositEpoch, + WithdrawableEpoch: builder.WithdrawableEpoch, + } + } + + return builder +} + +// calculateBuilderChecksum generates a CRC64 checksum of all builder fields (except balance) +func calculateBuilderChecksum(b *gloas.Builder) uint64 { + if b == nil { + return 0 + } + + data := make([]byte, 0, 80) + data = append(data, b.PublicKey[:]...) + data = append(data, b.Version) + data = append(data, b.ExecutionAddress[:]...) + data = append(data, uint64ToBytes(uint64(b.DepositEpoch))...) + data = append(data, uint64ToBytes(uint64(b.WithdrawableEpoch))...) + + return crc64.Checksum(data, crc64Table) +} + +// prepopulateFromDB pre-populates the builder set cache from the database +func (cache *builderCache) prepopulateFromDB() (uint64, error) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + maxIndex, err := db.GetMaxBuilderIndex(cache.indexer.ctx) + if err != nil { + return 0, fmt.Errorf("error getting max builder index: %w", err) + } + + if maxIndex == 0 { + return 0, nil + } + + cache.builderSetCache = make([]*builderEntry, maxIndex+1, maxIndex+1+1000) + + restoreCount := uint64(0) + + batchSize := uint64(10000) + for start := uint64(0); start <= maxIndex; start += batchSize { + end := min(start+batchSize, maxIndex) + + builders := db.GetBuilderRange(cache.indexer.ctx, start, end) + for _, dbBuilder := range builders { + if dbBuilder.Superseded { + continue + } + + builder := UnwrapDbBuilder(dbBuilder) + builderEntry := &builderEntry{ + finalChecksum: calculateBuilderChecksum(builder), + } + builderData := &BuilderData{ + WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)), + } + if cache.isActiveBuilder(builderData) { + builderEntry.activeData = builderData + } + builderEntry.statusFlags = GetBuilderStatusFlags(builder) + + cache.builderSetCache[dbBuilder.BuilderIndex] = builderEntry + + cache.indexer.pubkeyCache.Add(builder.PublicKey, phase0.ValidatorIndex(dbBuilder.BuilderIndex|BuilderIndexFlag)) + + restoreCount++ + } + } + + return restoreCount, nil +} + +// runPersistLoop handles the background persistence of builder states to the database +func (cache *builderCache) runPersistLoop() { + defer func() { + if err := recover(); err != nil { + cache.indexer.logger.WithError(err.(error)).Errorf( + "uncaught panic in indexer.beacon.builderCache.runPersistLoop subroutine: %v, stack: %v", + err, string(debug.Stack())) + time.Sleep(10 * time.Second) + + go cache.runPersistLoop() + } + }() + + for range cache.triggerDbUpdate { + time.Sleep(2 * time.Second) + err := db.RunDBTransaction(func(tx *sqlx.Tx) error { + hasMore, err := cache.persistBuilders(tx) + if hasMore { + select { + case cache.triggerDbUpdate <- true: + default: + } + } + return err + }) + if err != nil { + cache.indexer.logger.WithError(err).Errorf("error persisting builders") + } + } +} + +// persistBuilders writes a batch of builder states to the database +func (cache *builderCache) persistBuilders(tx *sqlx.Tx) (bool, error) { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + + const batchSize = 1000 + const maxPerRun = 10000 + + batch := make([]*dbtypes.Builder, 0, batchSize) + batchIndices := make([]uint64, 0, batchSize) + supersededPubkeys := make([][]byte, 0) + persisted := 0 + firstIndex := uint64(0) + lastIndex := uint64(0) + hasMore := false + + for index, entry := range cache.builderSetCache { + if entry == nil || entry.finalBuilder == nil { + continue + } + + if persisted == 0 && len(batch) == 0 { + firstIndex = uint64(index) + } + lastIndex = uint64(index) + + dbBuilder := &dbtypes.Builder{ + Pubkey: entry.finalBuilder.PublicKey[:], + BuilderIndex: uint64(index), + Version: entry.finalBuilder.Version, + ExecutionAddress: entry.finalBuilder.ExecutionAddress[:], + DepositEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.DepositEpoch)), + WithdrawableEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.WithdrawableEpoch)), + Superseded: false, + } + + batch = append(batch, dbBuilder) + batchIndices = append(batchIndices, uint64(index)) + + if len(batch) >= batchSize { + superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices) + if err != nil { + return false, err + } + supersededPubkeys = append(supersededPubkeys, superseded...) + + // Clear finalBuilder for persisted entries + for _, idx := range batchIndices { + if cache.builderSetCache[idx] != nil { + cache.builderSetCache[idx].finalBuilder = nil + } + } + + batch = batch[:0] + batchIndices = batchIndices[:0] + persisted += batchSize + + if persisted >= maxPerRun { + hasMore = true + break + } + } + } + + // Persist remaining batch + if len(batch) > 0 { + superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices) + if err != nil { + return false, err + } + supersededPubkeys = append(supersededPubkeys, superseded...) + + // Clear finalBuilder for persisted entries + for _, idx := range batchIndices { + if cache.builderSetCache[idx] != nil { + cache.builderSetCache[idx].finalBuilder = nil + } + } + + persisted += len(batch) + } + + // Batch mark superseded builders + if len(supersededPubkeys) > 0 { + err := db.SetBuildersSuperseded(supersededPubkeys, tx) + if err != nil { + return false, fmt.Errorf("error marking builders as superseded: %w", err) + } + } + + if persisted > 0 || len(supersededPubkeys) > 0 { + cache.indexer.logger.Infof("persisted %d builders to db [%d-%d], marked %d as superseded", + persisted, firstIndex, lastIndex, len(supersededPubkeys)) + } + + return hasMore, nil +} + +// persistBuilderBatch persists a batch of builders and returns pubkeys that were superseded +func (cache *builderCache) persistBuilderBatch(tx *sqlx.Tx, batch []*dbtypes.Builder, indices []uint64) ([][]byte, error) { + if len(batch) == 0 { + return nil, nil + } + + // Get range for this batch + minIndex := indices[0] + maxIndex := indices[0] + for _, idx := range indices[1:] { + if idx < minIndex { + minIndex = idx + } + if idx > maxIndex { + maxIndex = idx + } + } + + // Fetch existing builders in this batch's range + existingBuilders := db.GetBuilderRange(cache.indexer.ctx, minIndex, maxIndex) + existingByIndex := make(map[uint64]*dbtypes.Builder, len(existingBuilders)) + for _, b := range existingBuilders { + existingByIndex[b.BuilderIndex] = b + } + + // Find superseded pubkeys + supersededPubkeys := make([][]byte, 0) + for i, dbBuilder := range batch { + if existing, ok := existingByIndex[indices[i]]; ok { + if !bytes.Equal(existing.Pubkey, dbBuilder.Pubkey) { + supersededPubkeys = append(supersededPubkeys, existing.Pubkey) + } + } + } + + // Insert batch + err := db.InsertBuilderBatch(batch, tx) + if err != nil { + return nil, fmt.Errorf("error persisting builder batch: %w", err) + } + + return supersededPubkeys, nil +} diff --git a/indexer/beacon/client.go b/indexer/beacon/client.go index 7176d925a..42651be56 100644 --- a/indexer/beacon/client.go +++ b/indexer/beacon/client.go @@ -10,6 +10,7 @@ import ( v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethereum/go-ethereum/common" "github.com/ethpandaops/dora/clients/consensus" @@ -33,7 +34,9 @@ type Client struct { archive bool skipValidators bool - streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent] + streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent] + executionPayloadSubscription *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] + executionPayloadBidSubscription *utils.Subscription[*gloas.SignedExecutionPayloadBid] headRoot phase0.Root } @@ -80,6 +83,8 @@ func (c *Client) startIndexing() { // single ordered subscription for block & head events to preserve SSE ordering c.streamSubscription = c.client.SubscribeStreamEvent(100, true) + c.executionPayloadSubscription = c.client.SubscribeExecutionPayloadAvailableEvent(100, true) + c.executionPayloadBidSubscription = c.client.SubscribeExecutionPayloadBidEvent(100, true) go c.startClientLoop() } @@ -144,7 +149,7 @@ func (c *Client) runClientLoop() error { c.headRoot = headRoot - headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false) + headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false, true) if err != nil { return fmt.Errorf("failed processing head block: %v", err) } @@ -185,6 +190,16 @@ func (c *Client) runClientLoop() error { headEvent.Slot, headEvent.Block.String(), err) } } + case executionPayloadEvent := <-c.executionPayloadSubscription.Channel(): + err := c.processExecutionPayloadAvailableEvent(executionPayloadEvent) + if err != nil { + c.logger.Errorf("failed processing execution payload %v (%v): %v", executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot.String(), err) + } + case executionPayloadBidEvent := <-c.executionPayloadBidSubscription.Channel(): + err := c.processExecutionPayloadBidEvent(executionPayloadBidEvent) + if err != nil { + c.logger.Errorf("failed processing execution payload bid %v (%v): %v", executionPayloadBidEvent.Message.Slot, executionPayloadBidEvent.Message.ParentBlockRoot.String(), err) + } } } @@ -245,50 +260,54 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error { chainState := c.client.GetPool().GetChainState() dependentRoot := headEvent.CurrentDutyDependentRoot - - var dependentBlock *Block if !bytes.Equal(dependentRoot[:], consensus.NullRoot[:]) { block.dependentRoot = &dependentRoot - - dependentBlock = c.indexer.blockCache.getBlockByRoot(dependentRoot) - if dependentBlock == nil { - c.logger.Warnf("dependent block (%v) not found after backfilling", dependentRoot.String()) - } - } else { - dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, block, c) } // walk back the chain of epoch stats to ensure we have all duties & epoch specific data for the clients chain currentBlock := block - currentEpoch := chainState.EpochOfSlot(currentBlock.Slot) + headEpoch := chainState.EpochOfSlot(currentBlock.Slot) + currentEpoch := headEpoch minInMemorySlot := c.indexer.getMinInMemorySlot() absoluteMinInMemoryEpoch := c.indexer.getAbsoluteMinInMemoryEpoch() for { - if dependentBlock != nil && currentBlock.Slot >= minInMemorySlot { - epoch := chainState.EpochOfSlot(currentBlock.Slot) + parentRoot := currentBlock.GetParentRoot() + if parentRoot == nil { + break + } + + isEpochStart := false + parentBlock := c.indexer.blockCache.getBlockByRoot(*parentRoot) - // only request state for epochs that are allowed in memory by configuration - // we accept some gaps here, these will be fixed by the pruning/finalization process - requestState := epoch >= absoluteMinInMemoryEpoch + if currentBlock.Slot == 0 { + isEpochStart = true + } else if currentBlock.dependentRoot != nil && *parentRoot == *currentBlock.dependentRoot && (parentBlock == nil || parentBlock.Slot > 0) { + isEpochStart = true + } else if parentBlock != nil && chainState.EpochOfSlot(parentBlock.Slot) < currentEpoch { + isEpochStart = true + } + + if isEpochStart { + epoch := chainState.EpochOfSlot(currentBlock.Slot) + dependentRoot := *parentRoot // ensure epoch stats for the epoch - epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, requestState) + epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentRoot) + + if epoch >= absoluteMinInMemoryEpoch { + c.indexer.epochCache.ensureEpochDependentState(epochStats, currentBlock.Root) + } if !epochStats.addRequestedBy(c) { break } - if epochStats.dependentState == nil && epoch == currentEpoch { - // always load most recent dependent state to ensure we have the latest validator set - c.indexer.epochCache.addEpochStateRequest(epochStats) - } - } else { - if dependentBlock == nil { - c.logger.Debugf("epoch stats check failed: dependent block for %v:%v (%v) not found", currentBlock.Slot, chainState.EpochOfSlot(currentBlock.Slot), currentBlock.Root.String()) - } + } + + if parentBlock == nil || parentBlock.Slot < minInMemorySlot { break } - currentBlock = dependentBlock - dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, currentBlock, c) + currentBlock = parentBlock + currentEpoch = chainState.EpochOfSlot(currentBlock.Slot) } c.headRoot = block.Root @@ -297,7 +316,7 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error { // processStreamBlock processes a block received from the stream (either via block or head events). func (c *Client) processStreamBlock(slot phase0.Slot, root phase0.Root) (*Block, error) { - block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true) + block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true, false) if err != nil { return nil, err } @@ -351,7 +370,7 @@ func (c *Client) processReorg(oldHead *Block, newHead *Block) error { } // processBlock processes a block (from stream & polling). -func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) { +func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool, loadPayload bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) { chainState := c.client.GetPool().GetChainState() finalizedSlot := chainState.GetFinalizedSlot() processingTimes = make([]time.Duration, 3) @@ -409,6 +428,25 @@ func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0 return } + if loadPayload { + newPayload, _ := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + t1 := time.Now() + defer func() { + processingTimes[0] += time.Since(t1) + }() + + return LoadExecutionPayload(c.getContext(), c, root) + }) + + if !isNew && newPayload { + // write payload to db + err = c.persistExecutionPayload(block) + if err != nil { + return + } + } + } + if slot >= finalizedSlot && isNew { c.indexer.blockCache.addBlockToParentMap(block) c.indexer.blockCache.addBlockToExecBlockMap(block) @@ -532,7 +570,7 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error { if parentBlock == nil { var err error - parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false) + parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false, true) if err != nil { return fmt.Errorf("could not process block [0x%x]: %v", parentRoot, err) } @@ -559,3 +597,87 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error { } return nil } + +// processExecutionPayloadEvent processes an execution payload event from the event stream. +func (c *Client) processExecutionPayloadAvailableEvent(executionPayloadEvent *v1.ExecutionPayloadAvailableEvent) error { + if c.client.GetStatus() != consensus.ClientStatusOnline && c.client.GetStatus() != consensus.ClientStatusOptimistic { + // client is not ready, skip + return nil + } + + chainState := c.client.GetPool().GetChainState() + finalizedSlot := chainState.GetFinalizedSlot() + + var block *Block + + if executionPayloadEvent.Slot < finalizedSlot { + // block is in finalized epoch + // known block or a new orphaned block + + // don't add to cache, process this block right after loading the details + block = newBlock(c.indexer.dynSsz, executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot, 0) + + dbBlockHead := db.GetBlockHeadByRoot(c.getContext(), executionPayloadEvent.BlockRoot[:]) + if dbBlockHead != nil { + block.isInFinalizedDb = true + block.parentRoot = (*phase0.Root)(dbBlockHead.ParentRoot) + } + + } else { + block, _ = c.indexer.blockCache.createOrGetBlock(executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot) + } + + if block == nil { + c.logger.Warnf("execution payload event for unknown block %v:%v [0x%x]", chainState.EpochOfSlot(executionPayloadEvent.Slot), executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot) + return nil + } + + newPayload, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + return LoadExecutionPayload(c.getContext(), c, executionPayloadEvent.BlockRoot) + }) + if err != nil { + return err + } + + if newPayload { + // write payload to db + err = c.persistExecutionPayload(block) + if err != nil { + return err + } + } + + return nil +} + +func (c *Client) persistExecutionPayload(block *Block) error { + payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, c.indexer.blockCompression) + if err != nil { + return fmt.Errorf("marshal execution payload ssz failed: %v", err) + } + + return db.RunDBTransaction(func(tx *sqlx.Tx) error { + err := db.UpdateUnfinalizedBlockPayload(c.getContext(), tx, block.Root[:], payloadVer, payloadSSZ) + if err != nil { + return err + } + + return nil + }) +} + +func (c *Client) processExecutionPayloadBidEvent(executionPayloadBidEvent *gloas.SignedExecutionPayloadBid) error { + bid := &dbtypes.BlockBid{ + ParentRoot: executionPayloadBidEvent.Message.ParentBlockRoot[:], + ParentHash: executionPayloadBidEvent.Message.ParentBlockHash[:], + BlockHash: executionPayloadBidEvent.Message.BlockHash[:], + FeeRecipient: executionPayloadBidEvent.Message.FeeRecipient[:], + GasLimit: uint64(executionPayloadBidEvent.Message.GasLimit), + BuilderIndex: int64(executionPayloadBidEvent.Message.BuilderIndex), + Slot: uint64(executionPayloadBidEvent.Message.Slot), + Value: uint64(executionPayloadBidEvent.Message.Value), + ElPayment: uint64(executionPayloadBidEvent.Message.ExecutionPayment), + } + c.indexer.blockBidCache.AddBid(bid) + return nil +} diff --git a/indexer/beacon/duties/duties.go b/indexer/beacon/duties/duties.go index 9a6ee6402..dd9647edc 100644 --- a/indexer/beacon/duties/duties.go +++ b/indexer/beacon/duties/duties.go @@ -357,3 +357,62 @@ func swapOrNot(buf []byte, byteV byte, i ActiveIndiceIndex, input []ActiveIndice } return byteV, source } + +// GetPtcDuties returns the Payload Timeliness Committee (PTC) members for a given slot. +// The PTC is selected from the concatenated attestation committees for the slot using +// balance-weighted selection without shuffling. +func GetPtcDuties( + spec *consensus.ChainSpec, + state *BeaconState, + attesterDuties [][]ActiveIndiceIndex, + slot phase0.Slot, +) ([]ActiveIndiceIndex, error) { + if spec.PtcSize == 0 { + return nil, nil + } + + epoch := phase0.Epoch(slot / phase0.Slot(spec.SlotsPerEpoch)) + + // Derive PTC seed: hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot)) + seedData := []byte{} + seedHash := GetSeed(spec, state, epoch, spec.DomainPtcAttester) + seedData = append(seedData, seedHash[:]...) + seedData = append(seedData, UintToBytes(uint64(slot))...) + seed := Hash(seedData) + + // Concatenate all committee indices for the slot (in order) + indices := make([]ActiveIndiceIndex, 0) + for _, committee := range attesterDuties { + indices = append(indices, committee...) + } + + if len(indices) == 0 { + return nil, errors.New("empty committee indices") + } + + // Balance-weighted selection without shuffling (shuffle_indices=false) + // Uses same acceptance logic as GetProposerIndex (Electra-style 16-bit random values) + maxRandomValue := uint64(1<<16 - 1) + total := uint64(len(indices)) + selected := make([]ActiveIndiceIndex, 0, spec.PtcSize) + + for i := uint64(0); uint64(len(selected)) < spec.PtcSize; i++ { + // No shuffling - traverse indices in order + nextIndex := i % total + candidateIndex := indices[nextIndex] + + // Balance-weighted acceptance check (same as proposer selection) + b := append(seed[:], UintToBytes(i/16)...) + offset := (i % 16) * 2 + hash := Hash(b) + randomValue := BytesToUint(hash[offset : offset+2]) + + effectiveBal := uint64(state.GetEffectiveBalance(candidateIndex)) + + if effectiveBal*maxRandomValue >= spec.MaxEffectiveBalanceElectra*randomValue { + selected = append(selected, candidateIndex) + } + } + + return selected, nil +} diff --git a/indexer/beacon/epochcache.go b/indexer/beacon/epochcache.go index ce62a81b9..b71d7201d 100644 --- a/indexer/beacon/epochcache.go +++ b/indexer/beacon/epochcache.go @@ -65,7 +65,7 @@ func newEpochCache(indexer *Indexer) *epochCache { } // createOrGetEpochStats gets an existing EpochStats entry for the given epoch and dependentRoot or creates a new instance if not found. -func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root, createStateRequest bool) *EpochStats { +func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats { cache.cacheMutex.Lock() defer cache.cacheMutex.Unlock() @@ -77,43 +77,40 @@ func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot cache.statsMap[statsKey] = epochStats } - // get or create beacon state which the epoch status depends on (dependentRoot beacon state) - epochState := cache.stateMap[dependentRoot] - if epochState == nil && !epochStats.ready && createStateRequest { - epochState = newEpochState(dependentRoot) - cache.stateMap[dependentRoot] = epochState - - cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epoch, dependentRoot.String()) - } - - if epochState != nil { - epochStats.dependentState = epochState - - if epochState.loadingStatus == 2 && !epochStats.ready { - // dependent state is already loaded, process it - go epochStats.processState(cache.indexer, nil) - } - } - return epochStats } -func (cache *epochCache) addEpochStateRequest(epochStats *EpochStats) { +func (cache *epochCache) ensureEpochDependentState(epochStats *EpochStats, firstBlockRoot phase0.Root) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + if epochStats.dependentState != nil { return } - cache.cacheMutex.Lock() - defer cache.cacheMutex.Unlock() - + // get or create beacon state which the epoch status depends on (dependentRoot beacon state) epochState := cache.stateMap[epochStats.dependentRoot] - if epochState == nil { - epochState = newEpochState(epochStats.dependentRoot) + if epochState == nil && !epochStats.ready { + stateRoot := epochStats.dependentRoot + chainState := cache.indexer.consensusPool.GetChainState() + if chainState.IsFuluEnabled(epochStats.epoch) { + stateRoot = firstBlockRoot + } + + epochState = newEpochState(stateRoot) cache.stateMap[epochStats.dependentRoot] = epochState cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epochStats.epoch, epochStats.dependentRoot.String()) } - epochStats.dependentState = epochState + + if epochState != nil { + epochStats.dependentState = epochState + + if epochState.loadingStatus == 2 && !epochStats.ready { + // dependent state is already loaded, process it + go epochStats.processState(cache.indexer, nil, 0) + } + } } func (cache *epochCache) getEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats { @@ -468,11 +465,14 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool { log.Infof("loading epoch %v stats (dep: %v, req: %v)", epochStats.epoch, epochStats.dependentRoot.String(), len(epochStats.requestedBy)) + t1 := time.Now() state, err := epochStats.dependentState.loadState(client.getContext(), client, cache) if err != nil && epochStats.dependentState.loadingStatus == 0 { client.logger.Warnf("failed loading epoch %v stats (dep: %v): %v", epochStats.epoch, epochStats.dependentRoot.String(), err) } + loadDuration := time.Since(t1) + if epochStats.dependentState.loadingStatus != 2 { // epoch state could not be loaded epochStats.dependentState.retryCount++ @@ -497,7 +497,7 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool { cache.cacheMutex.Unlock() for _, stats := range dependentStats { - go stats.processState(cache.indexer, validatorSet) + go stats.processState(cache.indexer, validatorSet, loadDuration) } return true diff --git a/indexer/beacon/epochstate.go b/indexer/beacon/epochstate.go index 4d696f480..53cd35f47 100644 --- a/indexer/beacon/epochstate.go +++ b/indexer/beacon/epochstate.go @@ -8,6 +8,7 @@ import ( "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/electra" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" ) @@ -25,6 +26,7 @@ type epochState struct { stateSlot phase0.Slot validatorBalances []phase0.Gwei + builderBalances []phase0.Gwei randaoMixes []phase0.Root depositIndex uint64 syncCommittee []phase0.ValidatorIndex @@ -90,7 +92,6 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch } s.loadingStatus = 1 - client.logger.Debugf("loading state for slot %v", s.slotRoot.String()) ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout+(beaconHeaderRequestTimeout*2)) s.loadingCancel = cancel @@ -104,29 +105,48 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch } }() - var blockHeader *phase0.SignedBeaconBlockHeader + var beaconBlock *spec.VersionedSignedBeaconBlock block := client.indexer.blockCache.getBlockByRoot(s.slotRoot) if block != nil { - blockHeader = block.AwaitHeader(ctx, beaconHeaderRequestTimeout) + beaconBlock = block.AwaitBlock(ctx, beaconHeaderRequestTimeout) } - if blockHeader == nil { + if beaconBlock == nil { var err error - blockHeader, err = LoadBeaconHeader(ctx, client, s.slotRoot) + beaconBlock, err = LoadBeaconBlock(ctx, client, s.slotRoot) if err != nil { return nil, err } } - s.stateRoot = blockHeader.Message.StateRoot + if beaconBlock != nil { + slot, _ := beaconBlock.Slot() + client.logger.Infof("loading state for block root %v (slot %v)", s.slotRoot.String(), slot) - resState, err := LoadBeaconState(ctx, client, blockHeader.Message.StateRoot) + var err error + s.stateRoot, err = beaconBlock.StateRoot() + if err != nil { + return nil, fmt.Errorf("error getting state root from beacon block %v: %v", s.slotRoot.String(), err) + } + } + + resState, err := LoadBeaconState(ctx, client, s.stateRoot) if err != nil { return nil, err } - err = s.processState(resState, cache) + var executionPayload *gloas.SignedExecutionPayloadEnvelope + if beaconBlock != nil && beaconBlock.Version >= spec.DataVersionGloas { + if block != nil { + executionPayload = block.GetExecutionPayload(ctx) + } + if executionPayload == nil { + executionPayload, _ = LoadExecutionPayload(ctx, client, s.slotRoot) + } + } + + err = s.processState(resState, beaconBlock, executionPayload, cache) if err != nil { return nil, err } @@ -144,7 +164,7 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch // processState processes the state and updates the epochState instance. // the function extracts and unifies all relevant information from the beacon state, so the full beacon state can be dropped from memory afterwards. -func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epochCache) error { +func (s *epochState) processState(state *spec.VersionedBeaconState, beaconBlock *spec.VersionedSignedBeaconBlock, executionPayload *gloas.SignedExecutionPayloadEnvelope, cache *epochCache) error { slot, err := state.Slot() if err != nil { return fmt.Errorf("error getting slot from state %v: %v", s.slotRoot.String(), err) @@ -152,13 +172,38 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch s.stateSlot = slot + dependentRoot := s.slotRoot + if state.Version >= spec.DataVersionFulu { + blockRoots, err := getStateBlockRoots(state) + if err != nil { + return fmt.Errorf("error getting block roots from state %v: %v", s.slotRoot.String(), err) + } + + specs := cache.indexer.consensusPool.GetChainState().GetSpecs() + dependentRoot = blockRoots[slot%phase0.Slot(specs.SlotsPerHistoricalRoot)] + } + validatorList, err := state.Validators() if err != nil { return fmt.Errorf("error getting validators from state %v: %v", s.slotRoot.String(), err) } if cache != nil { - cache.indexer.validatorCache.updateValidatorSet(slot, s.slotRoot, validatorList) + cache.indexer.validatorCache.updateValidatorSet(slot, dependentRoot, validatorList) + } + + // Process builder set for Gloas + if state.Version >= spec.DataVersionGloas && state.Gloas != nil { + if cache != nil { + cache.indexer.builderCache.updateBuilderSet(slot, dependentRoot, state.Gloas.Builders) + } + + // Extract builder balances + builderBalances := make([]phase0.Gwei, len(state.Gloas.Builders)) + for i, builder := range state.Gloas.Builders { + builderBalances[i] = builder.Balance + } + s.builderBalances = builderBalances } validatorPubkeyMap := make(map[phase0.BLSPubKey]phase0.ValidatorIndex) @@ -179,7 +224,33 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch } s.randaoMixes = randaoMixes - s.depositIndex = getStateDepositIndex(state) + + if state.Version >= spec.DataVersionFulu { + if state.Version >= spec.DataVersionGloas { + isPostPayload := isGloasPostPayloadState(state, slot) + if isPostPayload && executionPayload != nil && + executionPayload.Message != nil && + executionPayload.Message.ExecutionRequests != nil && + len(executionPayload.Message.ExecutionRequests.Deposits) > 0 { + s.depositIndex = executionPayload.Message.ExecutionRequests.Deposits[0].Index + } else { + s.depositIndex = getStateDepositIndex(state) + } + } else { + blockRequests, err := beaconBlock.ExecutionRequests() + if err != nil { + return fmt.Errorf("error getting execution requests from block %v: %v", + s.slotRoot.String(), err) + } + if len(blockRequests.Deposits) > 0 { + s.depositIndex = blockRequests.Deposits[0].Index + } else { + s.depositIndex = getStateDepositIndex(state) + } + } + } else { + s.depositIndex = getStateDepositIndex(state) + } if state.Version >= spec.DataVersionAltair { currentSyncCommittee, err := getStateCurrentSyncCommittee(state) @@ -232,3 +303,17 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch return nil } + +// isGloasPostPayloadState checks whether the Gloas state is post-payload +// (i.e. execution payload deposits have been applied) for the given slot. +func isGloasPostPayloadState(state *spec.VersionedBeaconState, slot phase0.Slot) bool { + if state.Gloas == nil { + return false + } + bitfieldLen := uint64(len(state.Gloas.ExecutionPayloadAvailability)) * 8 + if bitfieldLen == 0 { + return false + } + idx := uint64(slot) % bitfieldLen + return state.Gloas.ExecutionPayloadAvailability[idx/8]&(1<<(idx%8)) != 0 +} diff --git a/indexer/beacon/epochstats.go b/indexer/beacon/epochstats.go index 7f1fbc9cb..234c383dc 100644 --- a/indexer/beacon/epochstats.go +++ b/indexer/beacon/epochstats.go @@ -47,10 +47,11 @@ type EpochStatsValues struct { RandaoMix phase0.Hash32 NextRandaoMix phase0.Hash32 ActiveIndices []phase0.ValidatorIndex - EffectiveBalances []uint32 + EffectiveBalances []uint32 // effective balance in full ETH of last epoch for pre-fulu stats, effective balance in full ETH of current epoch for fulu+ stats ProposerDuties []phase0.ValidatorIndex AttesterDuties [][][]duties.ActiveIndiceIndex SyncCommitteeDuties []phase0.ValidatorIndex + PtcDuties [][]duties.ActiveIndiceIndex // [slot_index][ptc_member_index] - PTC duties for Gloas+ epochs ActiveValidators uint64 TotalBalance phase0.Gwei ActiveBalance phase0.Gwei @@ -265,7 +266,9 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt proposerDuties = append(proposerDuties, proposerIndex) } - values.ProposerDuties = proposerDuties + if len(values.ProposerDuties) == 0 { + values.ProposerDuties = proposerDuties + } if beaconState.RandaoMix != nil { values.RandaoMix = *beaconState.RandaoMix } @@ -273,6 +276,17 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt // compute committees attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch) values.AttesterDuties = attesterDuties + + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } } return values, nil @@ -291,6 +305,7 @@ func (es *EpochStats) pruneValues() { ProposerDuties: es.values.ProposerDuties, AttesterDuties: nil, // prune SyncCommitteeDuties: es.values.SyncCommitteeDuties, + PtcDuties: nil, // prune - only needed for recent epochs ActiveValidators: es.values.ActiveValidators, TotalBalance: es.values.TotalBalance, ActiveBalance: es.values.ActiveBalance, @@ -322,7 +337,7 @@ func (es *EpochStats) loadValuesFromDb(ctx context.Context, chainState *consensu } // processState processes the epoch state and computes proposer and attester duties. -func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator) { +func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator, loadDuration time.Duration) { if es.dependentState == nil || es.dependentState.loadingStatus != 2 { return } @@ -424,7 +439,7 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali offset = slotsPerEpoch } - values.ProposerDuties = dependentState.proposerLookahead[offset : offset+slotsPerEpoch] + values.ProposerDuties = dependentState.proposerLookahead[offset:] } else { proposerDuties := []phase0.ValidatorIndex{} for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { @@ -450,6 +465,20 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali } values.AttesterDuties = attesterDuties + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, ptcErr := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + if ptcErr != nil { + indexer.logger.Warnf("failed computing PTC duties for slot %v: %v", slot, ptcErr) + } + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } + if beaconState.RandaoMix != nil { values.RandaoMix = *beaconState.RandaoMix values.NextRandaoMix = *beaconState.NextRandaoMix @@ -475,12 +504,13 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali es.isInDb = true indexer.logger.Infof( - "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, %v ms), %v bytes", + "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, load: %v ms, process: %v ms), %v bytes", es.epoch, es.dependentRoot.String(), dependentState.stateRoot.String(), values.ActiveValidators, len(validatorSet), + loadDuration.Milliseconds(), time.Since(t1).Milliseconds(), len(packedSsz), ) @@ -555,14 +585,20 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E // compute proposers proposerDuties := []phase0.ValidatorIndex{} - for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { - proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot) - proposerIndex := phase0.ValidatorIndex(math.MaxInt64) - if err == nil { - proposerIndex = values.ActiveIndices[proposer] - } - proposerDuties = append(proposerDuties, proposerIndex) + specs := chainState.GetSpecs() + if uint64(len(parentState.dependentState.proposerLookahead)) > specs.SlotsPerEpoch { + proposerDuties = parentState.dependentState.proposerLookahead[specs.SlotsPerEpoch:] + } else { + for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ { + proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot) + proposerIndex := phase0.ValidatorIndex(math.MaxInt64) + if err == nil { + proposerIndex = values.ActiveIndices[proposer] + } + + proposerDuties = append(proposerDuties, proposerIndex) + } } values.ProposerDuties = proposerDuties @@ -571,6 +607,17 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch) values.AttesterDuties = attesterDuties + // compute PTC duties (Gloas+ only) + if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil { + ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch) + for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ { + slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex) + ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot) + ptcDuties[slotIndex] = ptc + } + values.PtcDuties = ptcDuties + } + es.precalcValues = values indexer.logger.Infof( diff --git a/indexer/beacon/finalization.go b/indexer/beacon/finalization.go index 2bb70ba17..dc210993f 100644 --- a/indexer/beacon/finalization.go +++ b/indexer/beacon/finalization.go @@ -9,6 +9,7 @@ import ( v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" "github.com/ethpandaops/dora/db" @@ -149,6 +150,15 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R if block.block == nil { return true, fmt.Errorf("missing block body for canonical block %v (%v)", block.Slot, block.Root.String()) } + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + if _, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) { + return LoadExecutionPayload(client.getContext(), client, block.Root) + }); err != nil { + client.logger.Warnf("failed loading finalized execution payload %v (%v): %v", block.Slot, block.Root.String(), err) + } + } + canonicalBlocks = append(canonicalBlocks, block) } else { if block.block == nil { @@ -206,6 +216,10 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R } } + if firstBlock.Slot == 0 { + dependentRoot = phase0.Root{} + } + if !isValid { return false, fmt.Errorf("first canonical block %v (%v) is not the first block of epoch %v", firstBlock.Slot, firstBlock.Root.String(), epoch) } @@ -268,7 +282,7 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R // if the state is not yet loaded, we set it to high priority and wait for it to be loaded if !epochStats.ready { if epochStats.dependentState == nil { - indexer.epochCache.addEpochStateRequest(epochStats) + indexer.epochCache.ensureEpochDependentState(epochStats, canonicalBlocks[0].Root) } if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 { indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", epoch, dependentRoot.String()) @@ -314,6 +328,36 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R finalizedForkIds[block.GetForkId()] = true } + // Determine payload status for canonical blocks (ePBS only) + // A payload is orphaned if the next canonical block doesn't build on it + allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...) + for i, block := range canonicalBlocks { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next canonical block + var nextBlock *Block + if i+1 < len(allCanonicalBlocks) { + nextBlock = allCanonicalBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + dependentGroups := map[phase0.Root][]*Block{} for _, block := range orphanedBlocks { var dependentRoot phase0.Root @@ -383,6 +427,36 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R } } + // Determine payload status for orphaned chain blocks (ePBS only) + // A payload is orphaned if the next block in the chain doesn't build on it + allChainBlocks := append(chain, nextBlocks...) + for i, block := range chain { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next block in this orphaned chain + var nextBlock *Block + if i+1 < len(allChainBlocks) { + nextBlock = allChainBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // compute votes for canonical blocks votingBlocks := make([]*Block, len(chain)+len(nextBlocks)) copy(votingBlocks, chain) @@ -531,6 +605,7 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R // update validator cache if len(canonicalBlocks) > 0 { indexer.validatorCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root) + indexer.builderCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root) } // clean fork cache diff --git a/indexer/beacon/indexer.go b/indexer/beacon/indexer.go index b49bc187e..015551057 100644 --- a/indexer/beacon/indexer.go +++ b/indexer/beacon/indexer.go @@ -47,6 +47,8 @@ type Indexer struct { pubkeyCache *pubkeyCache validatorCache *validatorCache validatorActivity *validatorActivityCache + blockBidCache *blockBidCache + builderCache *builderCache // indexer state clients []*Client @@ -118,6 +120,8 @@ func NewIndexer(ctx context.Context, logger logrus.FieldLogger, consensusPool *c indexer.pubkeyCache = newPubkeyCache(indexer, utils.Config.Indexer.PubkeyCachePath) indexer.validatorCache = newValidatorCache(indexer) indexer.validatorActivity = newValidatorActivityCache(indexer) + indexer.blockBidCache = newBlockBidCache(indexer) + indexer.builderCache = newBuilderCache(indexer) indexer.dbWriter = newDbWriter(indexer) badChainRoots := utils.Config.Indexer.BadChainRoots @@ -275,6 +279,14 @@ func (indexer *Indexer) StartIndexer() { indexer.logger.Infof("restored %v validators from DB (%.3f sec)", validatorCount, time.Since(t1).Seconds()) } + // restore finalized builder set from db + t1 = time.Now() + if builderCount, err := indexer.builderCache.prepopulateFromDB(); err != nil { + indexer.logger.WithError(err).Errorf("failed loading builder set") + } else if builderCount > 0 { + indexer.logger.Infof("restored %v builders from DB (%.3f sec)", builderCount, time.Since(t1).Seconds()) + } + // restore unfinalized epoch stats from db restoredEpochStats := 0 t1 = time.Now() @@ -292,7 +304,7 @@ func (indexer *Indexer) StartIndexer() { processingWaitGroup.Done() }() - epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot), false) + epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot)) pruneStats := dbDuty.Epoch < uint64(indexer.lastPrunedEpoch) err := epochStats.restoreFromDb(dbDuty, chainState, !pruneStats) @@ -340,6 +352,7 @@ func (indexer *Indexer) StartIndexer() { // restore unfinalized blocks from db restoredBlockCount := 0 restoredBodyCount := 0 + restoredPayloadCount := 0 t1 = time.Now() err = db.StreamUnfinalizedBlocks(indexer.ctx, uint64(finalizedSlot), func(dbBlock *dbtypes.UnfinalizedBlock) { block, _ := indexer.blockCache.createOrGetBlock(phase0.Root(dbBlock.Root), phase0.Slot(dbBlock.Slot)) @@ -377,10 +390,23 @@ func (indexer *Indexer) StartIndexer() { block.SetBlock(blockBody) restoredBodyCount++ } else { - block.setBlockIndex(blockBody) + block.setBlockIndex(blockBody, nil) block.isInFinalizedDb = true } + if len(dbBlock.PayloadSSZ) > 0 { + blockPayload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ) + if err != nil { + indexer.logger.Warnf("could not restore unfinalized block payload %v [%x] from db: %v", dbBlock.Slot, dbBlock.Root, err) + } else if block.processingStatus == 0 { + block.SetExecutionPayload(blockPayload) + restoredPayloadCount++ + } else { + block.setBlockIndex(blockBody, blockPayload) + block.hasExecutionPayload = true + } + } + indexer.blockCache.addBlockToExecBlockMap(block) blockFork := indexer.forkCache.getForkById(block.forkId) @@ -404,6 +430,9 @@ func (indexer *Indexer) StartIndexer() { indexer.logger.Infof("restored %v unfinalized blocks from DB (%v with bodies, %.3f sec)", restoredBlockCount, restoredBodyCount, time.Since(t1).Seconds()) } + // restore block bids from db + indexer.blockBidCache.loadFromDB(chainState.CurrentSlot()) + // start indexing for all clients for _, client := range indexer.clients { client.startIndexing() @@ -424,7 +453,8 @@ func (indexer *Indexer) StartIndexer() { if len(genesisBlock) == 0 { indexer.logger.Warnf("genesis block not found in cache") } else { - indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root, true) + epochStats := indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root) + indexer.epochCache.ensureEpochDependentState(epochStats, genesisBlock[0].Root) } } @@ -438,6 +468,11 @@ func (indexer *Indexer) StartIndexer() { } func (indexer *Indexer) StopIndexer() { + // flush block bids to db before shutdown + if err := indexer.blockBidCache.flushAll(); err != nil { + indexer.logger.WithError(err).Errorf("error flushing block bids on shutdown") + } + indexer.pubkeyCache.Close() } @@ -489,6 +524,11 @@ func (indexer *Indexer) runIndexerLoop() { slotIndex := chainState.SlotToSlotIndex(phase0.Slot(slotEvent.Number())) slotProgress := uint8(100 / chainState.GetSpecs().SlotsPerEpoch * uint64(slotIndex)) + // flush old block bids if needed + if err := indexer.blockBidCache.checkAndFlush(); err != nil { + indexer.logger.WithError(err).Errorf("failed flushing block bids") + } + // precalc next canonical duties on epoch start if epoch >= indexer.lastPrecalcRunEpoch { err := indexer.precalcNextEpochStats(epoch) diff --git a/indexer/beacon/indexer_getter.go b/indexer/beacon/indexer_getter.go index 1898f8d7b..c1d85d842 100644 --- a/indexer/beacon/indexer_getter.go +++ b/indexer/beacon/indexer_getter.go @@ -9,9 +9,11 @@ import ( v1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/electra" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/clients/consensus" "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" dynssz "github.com/pk910/dynamic-ssz" ) @@ -222,6 +224,14 @@ func (indexer *Indexer) GetOrphanedBlockByRoot(blockRoot phase0.Root) (*Block, e block.SetHeader(header) block.SetBlock(blockBody) + if len(orphanedBlock.PayloadSSZ) > 0 { + payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, orphanedBlock.PayloadVer, orphanedBlock.PayloadSSZ) + if err != nil { + return nil, fmt.Errorf("could not restore orphaned block payload %v [%x] from db: %v", header.Message.Slot, orphanedBlock.Root, err) + } + block.SetExecutionPayload(payload) + } + return block, nil } @@ -499,3 +509,71 @@ func (indexer *Indexer) GetFullValidatorByIndex(validatorIndex phase0.ValidatorI return validatorData } + +// GetBlockBids returns the execution payload bids for a given parent block root. +// It first checks the in-memory cache, then falls back to the database. +func (indexer *Indexer) GetBlockBids(parentBlockRoot phase0.Root) []*dbtypes.BlockBid { + // First check the in-memory cache + bids := indexer.blockBidCache.GetBidsForBlockRoot(parentBlockRoot) + if len(bids) > 0 { + return bids + } + + // Fall back to database + return db.GetBidsForBlockRoot(indexer.ctx, parentBlockRoot[:]) +} + +// StreamActiveBuilderDataForRoot streams the available builder set data for a given blockRoot. +func (indexer *Indexer) StreamActiveBuilderDataForRoot(blockRoot phase0.Root, activeOnly bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error { + return indexer.builderCache.streamBuilderSetForRoot(blockRoot, activeOnly, epoch, cb) +} + +// GetBuilderSetSize returns the size of the builder set cache. +func (indexer *Indexer) GetBuilderSetSize() uint64 { + return indexer.builderCache.getBuilderSetSize() +} + +// GetBuilderByIndex returns the builder by index for the canonical head. +func (indexer *Indexer) GetBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder { + return indexer.builderCache.getBuilderByIndex(index, overrideForkId) +} + +// GetRecentBuilderBalances returns the most recent builder balances for the given fork. +func (indexer *Indexer) GetRecentBuilderBalances(overrideForkId *ForkKey) []phase0.Gwei { + chainState := indexer.consensusPool.GetChainState() + + canonicalHead := indexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil + } + + headEpoch := chainState.EpochOfSlot(canonicalHead.Slot) + + var epochStats *EpochStats + for { + cEpoch := chainState.EpochOfSlot(canonicalHead.Slot) + if headEpoch-cEpoch > 2 { + return nil + } + + dependentBlock := indexer.blockCache.getDependentBlock(chainState, canonicalHead, nil) + if dependentBlock == nil { + return nil + } + canonicalHead = dependentBlock + + stats := indexer.epochCache.getEpochStats(cEpoch, dependentBlock.Root) + if cEpoch > 0 && (stats == nil || stats.dependentState == nil || stats.dependentState.loadingStatus != 2) { + continue // retry previous state + } + + epochStats = stats + break + } + + if epochStats == nil || epochStats.dependentState == nil { + return nil + } + + return epochStats.dependentState.builderBalances +} diff --git a/indexer/beacon/precalc.go b/indexer/beacon/precalc.go index 078370eda..ada710034 100644 --- a/indexer/beacon/precalc.go +++ b/indexer/beacon/precalc.go @@ -32,7 +32,7 @@ func (indexer *Indexer) precalcNextEpochStats(epoch phase0.Epoch) error { } // precompute epoch stats for the epoch if we have the parent epoch stats ready - epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, false) + epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root) if !epochStats.ready { var parentDependentBlock *Block if chainState.EpochOfSlot(dependentBlock.Slot) == epoch-1 { diff --git a/indexer/beacon/pruning.go b/indexer/beacon/pruning.go index ff64840ad..92a02a010 100644 --- a/indexer/beacon/pruning.go +++ b/indexer/beacon/pruning.go @@ -117,7 +117,7 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui // if the state is not yet loaded, we set it to high priority and wait for it to be loaded if epochStats != nil && !epochStats.ready { if epochStats.dependentState == nil { - indexer.epochCache.addEpochStateRequest(epochStats) + indexer.epochCache.ensureEpochDependentState(epochStats, blocks[0].Root) } if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 { indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", pruneEpoch, dependentRoot.String()) @@ -169,6 +169,36 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui } } + // Determine payload status for chain blocks (ePBS only) + // A payload is orphaned if the next block in the chain doesn't build on it + allChainBlocks := append(chain, nextBlocks...) + for i, block := range chain { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next block in this chain + var nextBlock *Block + if i+1 < len(allChainBlocks) { + nextBlock = allChainBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // compute votes for canonical blocks votingBlocks := make([]*Block, len(chain)+len(nextBlocks)) copy(votingBlocks, chain) @@ -257,8 +287,9 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui for _, block := range pruningBlocks { block.isInFinalizedDb = true block.processingStatus = dbtypes.UnfinalizedBlockStatusPruned - block.setBlockIndex(block.block) + block.setBlockIndex(block.block, block.executionPayload) block.block = nil + block.executionPayload = nil block.blockResults = nil } diff --git a/indexer/beacon/requests.go b/indexer/beacon/requests.go index df6ec6fb3..c5058014f 100644 --- a/indexer/beacon/requests.go +++ b/indexer/beacon/requests.go @@ -6,6 +6,7 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" ) @@ -18,6 +19,9 @@ const beaconBodyRequestTimeout time.Duration = 30 * time.Second // BeaconStateRequestTimeout is the timeout duration for beacon state requests. const beaconStateRequestTimeout time.Duration = 600 * time.Second +// ExecutionPayloadRequestTimeout is the timeout duration for execution payload requests. +const executionPayloadRequestTimeout time.Duration = 30 * time.Second + const beaconStateRetryCount = 10 // LoadBeaconHeader loads the block header from the client. @@ -68,10 +72,29 @@ func LoadBeaconState(ctx context.Context, client *Client, root phase0.Root) (*sp ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout) defer cancel() - resState, err := client.client.GetRPCClient().GetState(ctx, fmt.Sprintf("0x%x", root[:])) + stateRef := fmt.Sprintf("0x%x", root[:]) + nullRoot := phase0.Root{} + if root == nullRoot { + stateRef = "genesis" + } + + resState, err := client.client.GetRPCClient().GetState(ctx, stateRef) if err != nil { return nil, err } return resState, nil } + +// LoadExecutionPayload loads the execution payload from the client. +func LoadExecutionPayload(ctx context.Context, client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(ctx, executionPayloadRequestTimeout) + defer cancel() + + payload, err := client.client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, root) + if err != nil { + return nil, err + } + + return payload, nil +} diff --git a/indexer/beacon/synchronizer.go b/indexer/beacon/synchronizer.go index 436ce6fe3..76ca8f687 100644 --- a/indexer/beacon/synchronizer.go +++ b/indexer/beacon/synchronizer.go @@ -10,6 +10,7 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" "github.com/ethpandaops/dora/clients/consensus" @@ -264,11 +265,17 @@ func (s *synchronizer) loadBlockHeader(client *Client, slot phase0.Slot) (*phase } func (s *synchronizer) loadBlockBody(client *Client, root phase0.Root) (*spec.VersionedSignedBeaconBlock, error) { - ctx, cancel := context.WithTimeout(s.syncCtx, beaconHeaderRequestTimeout) + ctx, cancel := context.WithTimeout(s.syncCtx, beaconBodyRequestTimeout) defer cancel() return LoadBeaconBlock(ctx, client, root) } +func (s *synchronizer) loadBlockPayload(client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) { + ctx, cancel := context.WithTimeout(s.syncCtx, executionPayloadRequestTimeout) + defer cancel() + return LoadExecutionPayload(ctx, client, root) +} + func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry bool) (bool, error) { if !utils.Config.Indexer.ResyncForceUpdate && db.IsEpochSynchronized(s.syncCtx, uint64(syncEpoch)) { return true, nil @@ -327,6 +334,17 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry block.SetBlock(blockBody) } + if slot > 0 && chainState.IsEip7732Enabled(chainState.EpochOfSlot(slot)) { + blockPayload, err := s.loadBlockPayload(client, phase0.Root(blockRoot)) + if err != nil && !lastTry { + return false, fmt.Errorf("error fetching slot %v execution payload: %v", slot, err) + } + + if blockPayload != nil { + block.SetExecutionPayload(blockPayload) + } + } + s.cachedBlocks[slot] = block } @@ -365,7 +383,9 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry } epochState := newEpochState(dependentRoot) + t1 := time.Now() state, err := epochState.loadState(s.syncCtx, client, nil) + loadDuration := time.Since(t1) if (err != nil || epochState.loadingStatus != 2) && !lastTry { return false, fmt.Errorf("error fetching epoch %v state: %v", syncEpoch, err) } @@ -385,7 +405,7 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry if epochState != nil && epochState.loadingStatus == 2 { epochStats = newEpochStats(syncEpoch, dependentRoot) epochStats.dependentState = epochState - epochStats.processState(s.indexer, validatorSet) + epochStats.processState(s.indexer, validatorSet, loadDuration) epochStatsValues = epochStats.GetValues(false) } @@ -410,6 +430,36 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry sim.validatorSet = validatorSet } + // Determine payload status for canonical blocks (ePBS only) + // A payload is orphaned if the next canonical block doesn't build on it + allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...) + for i, block := range canonicalBlocks { + if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + continue + } + + blockIndex := block.GetBlockIndex(s.indexer.ctx) + if blockIndex == nil || blockIndex.ExecutionNumber == 0 { + continue // no execution payload + } + + // Find the next canonical block + var nextBlock *Block + if i+1 < len(allCanonicalBlocks) { + nextBlock = allCanonicalBlocks[i+1] + } + + if nextBlock != nil { + nextBlockIndex := nextBlock.GetBlockIndex(s.indexer.ctx) + if nextBlockIndex != nil { + // Check if next block builds on this block's payload + if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + block.isPayloadOrphaned = true + } + } + } + } + // save blocks err = db.RunDBTransaction(func(tx *sqlx.Tx) error { err = s.indexer.dbWriter.persistEpochData(tx, syncEpoch, canonicalBlocks, epochStats, epochVotes, sim) diff --git a/indexer/beacon/writedb.go b/indexer/beacon/writedb.go index 8f7f5f581..a62932d10 100644 --- a/indexer/beacon/writedb.go +++ b/indexer/beacon/writedb.go @@ -79,6 +79,11 @@ func (dbw *dbWriter) persistBlockData(tx *sqlx.Tx, block *Block, epochStats *Epo dbBlock.Status = dbtypes.Orphaned } + // Apply payload orphaned status from block flag (set during finalization/sync) + if block.isPayloadOrphaned { + dbBlock.PayloadStatus = dbtypes.PayloadStatusOrphaned + } + err := db.InsertSlot(dbw.indexer.ctx, tx, dbBlock) if err != nil { return nil, fmt.Errorf("error inserting slot: %v", err) @@ -245,6 +250,8 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override epochStatsValues = epochStats.GetValues(true) } + chainState := dbw.indexer.consensusPool.GetChainState() + graffiti, _ := blockBody.Graffiti() attestations, _ := blockBody.Attestations() deposits, _ := blockBody.Deposits() @@ -253,28 +260,56 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override proposerSlashings, _ := blockBody.ProposerSlashings() blsToExecChanges, _ := blockBody.BLSToExecutionChanges() syncAggregate, _ := blockBody.SyncAggregate() + executionBlockHash, _ := blockBody.ExecutionBlockHash() blobKzgCommitments, _ := blockBody.BlobKZGCommitments() - var executionExtraData []byte var executionBlockNumber uint64 - var executionBlockHash phase0.Hash32 + var executionBlockParentHash []byte + var executionExtraData []byte var executionTransactions []bellatrix.Transaction var executionWithdrawals []*capella.Withdrawal - - executionPayload, _ := blockBody.ExecutionPayload() - if executionPayload != nil { - executionExtraData, _ = executionPayload.ExtraData() - executionBlockHash, _ = executionPayload.BlockHash() - executionBlockNumber, _ = executionPayload.BlockNumber() - executionTransactions, _ = executionPayload.Transactions() - executionWithdrawals, _ = executionPayload.Withdrawals() - } - var depositRequests []*electra.DepositRequest + var payloadStatus dbtypes.PayloadStatus + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + executionBlockNumber = blockPayload.Message.Payload.BlockNumber + executionBlockParentHash = blockPayload.Message.Payload.ParentHash[:] + executionExtraData = blockPayload.Message.Payload.ExtraData + executionTransactions = blockPayload.Message.Payload.Transactions + executionWithdrawals = blockPayload.Message.Payload.Withdrawals + depositRequests = blockPayload.Message.ExecutionRequests.Deposits + payloadStatus = dbtypes.PayloadStatusCanonical + } else { + payloadStatus = dbtypes.PayloadStatusMissing + } + } else { + payloadStatus = dbtypes.PayloadStatusCanonical + executionBlockNumber, _ = blockBody.ExecutionBlockNumber() + executionPayload, _ := blockBody.ExecutionPayload() + if executionPayload != nil { + executionExtraData, _ = executionPayload.ExtraData() + executionTransactions, _ = executionPayload.Transactions() + executionWithdrawals, _ = executionPayload.Withdrawals() + if parentHash, err := executionPayload.ParentHash(); err == nil { + executionBlockParentHash = parentHash[:] + } + } + executionRequests, _ := blockBody.ExecutionRequests() + if executionRequests != nil { + depositRequests = executionRequests.Deposits + } + } - executionRequests, _ := blockBody.ExecutionRequests() - if executionRequests != nil { - depositRequests = executionRequests.Deposits + // Get builder index from block, default to -1 (self-built/MaxUint64) + var builderIndexInt64 int64 = -1 + if blockIndex := block.GetBlockIndex(dbw.indexer.ctx); blockIndex != nil { + if blockIndex.BuilderIndex == math.MaxUint64 { + builderIndexInt64 = -1 + } else { + builderIndexInt64 = int64(blockIndex.BuilderIndex) + } } dbBlock := dbtypes.Slot{ @@ -295,7 +330,9 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override BLSChangeCount: uint64(len(blsToExecChanges)), BlobCount: uint64(len(blobKzgCommitments)), RecvDelay: block.recvDelay, + PayloadStatus: payloadStatus, BlockUid: block.BlockUID, + BuilderIndex: builderIndexInt64, } blockSize, err := getBlockSize(block.dynSsz, blockBody) @@ -331,6 +368,7 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override dbBlock.EthTransactionCount = uint64(len(executionTransactions)) dbBlock.EthBlockNumber = &executionBlockNumber dbBlock.EthBlockHash = executionBlockHash[:] + dbBlock.EthBlockParentHash = executionBlockParentHash dbBlock.EthBlockExtra = executionExtraData dbBlock.EthBlockExtraText = utils.GraffitiToString(executionExtraData[:]) dbBlock.WithdrawCount = uint64(len(executionWithdrawals)) @@ -409,6 +447,15 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas) dbBlock.EthFeeRecipient = payload.FeeRecipient[:] } + case spec.DataVersionGloas: + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + payload := blockPayload.Message.Payload + dbBlock.EthGasUsed = payload.GasUsed + dbBlock.EthGasLimit = payload.GasLimit + dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas) + dbBlock.EthFeeRecipient = payload.FeeRecipient[:] + } } } @@ -482,15 +529,27 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat proposerSlashings, _ := blockBody.ProposerSlashings() blsToExecChanges, _ := blockBody.BLSToExecutionChanges() syncAggregate, _ := blockBody.SyncAggregate() - executionTransactions, _ := blockBody.ExecutionTransactions() - executionWithdrawals, _ := blockBody.Withdrawals() blobKzgCommitments, _ := blockBody.BlobKZGCommitments() + var executionTransactions []bellatrix.Transaction + var executionWithdrawals []*capella.Withdrawal var depositRequests []*electra.DepositRequest - executionRequests, _ := blockBody.ExecutionRequests() - if executionRequests != nil { - depositRequests = executionRequests.Deposits + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + dbEpoch.PayloadCount++ + executionTransactions = blockPayload.Message.Payload.Transactions + executionWithdrawals = blockPayload.Message.Payload.Withdrawals + depositRequests = blockPayload.Message.ExecutionRequests.Deposits + } + } else { + executionTransactions, _ = blockBody.ExecutionTransactions() + executionWithdrawals, _ = blockBody.Withdrawals() + executionRequests, _ := blockBody.ExecutionRequests() + if executionRequests != nil { + depositRequests = executionRequests.Deposits + } } dbEpoch.AttestationCount += uint64(len(attestations)) @@ -564,6 +623,13 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat dbEpoch.EthGasUsed += payload.GasUsed dbEpoch.EthGasLimit += payload.GasLimit } + case spec.DataVersionGloas: + blockPayload := block.GetExecutionPayload(dbw.indexer.ctx) + if blockPayload != nil { + payload := blockPayload.Message.Payload + dbEpoch.EthGasUsed += payload.GasUsed + dbEpoch.EthGasLimit += payload.GasLimit + } } } } @@ -652,14 +718,26 @@ func (dbw *dbWriter) persistBlockDepositRequests(tx *sqlx.Tx, block *Block, orph } func (dbw *dbWriter) buildDbDepositRequests(block *Block, orphaned bool, overrideForkId *ForkKey) []*dbtypes.Deposit { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.Deposit{} } deposits := requests.Deposits @@ -839,14 +917,29 @@ func (dbw *dbWriter) persistBlockConsolidationRequests(tx *sqlx.Tx, block *Block } func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.ConsolidationRequest { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + var blockNumber uint64 + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + blockNumber = payload.Message.Payload.BlockNumber + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() + blockNumber, _ = blockBody.ExecutionBlockNumber() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.ConsolidationRequest{} } if sim == nil { @@ -868,8 +961,6 @@ func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, o blockResults = sim.replayBlockResults(block) } - blockNumber, _ := blockBody.ExecutionBlockNumber() - dbConsolidations := make([]*dbtypes.ConsolidationRequest, len(consolidations)) for idx, consolidation := range consolidations { dbConsolidation := &dbtypes.ConsolidationRequest{ @@ -920,14 +1011,29 @@ func (dbw *dbWriter) persistBlockWithdrawalRequests(tx *sqlx.Tx, block *Block, o } func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.WithdrawalRequest { - blockBody := block.GetBlock(dbw.indexer.ctx) - if blockBody == nil { - return nil + chainState := dbw.indexer.consensusPool.GetChainState() + + var requests *electra.ExecutionRequests + var blockNumber uint64 + + if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) { + payload := block.GetExecutionPayload(dbw.indexer.ctx) + if payload != nil { + requests = payload.Message.ExecutionRequests + blockNumber = payload.Message.Payload.BlockNumber + } + } else { + blockBody := block.GetBlock(dbw.indexer.ctx) + if blockBody == nil { + return nil + } + + requests, _ = blockBody.ExecutionRequests() + blockNumber, _ = blockBody.ExecutionBlockNumber() } - requests, err := blockBody.ExecutionRequests() - if err != nil { - return nil + if requests == nil { + return []*dbtypes.WithdrawalRequest{} } if sim == nil { @@ -949,8 +1055,6 @@ func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, over blockResults = sim.replayBlockResults(block) } - blockNumber, _ := blockBody.ExecutionBlockNumber() - dbWithdrawalRequests := make([]*dbtypes.WithdrawalRequest, len(withdrawalRequests)) for idx, withdrawalRequest := range withdrawalRequests { dbWithdrawalRequest := &dbtypes.WithdrawalRequest{ diff --git a/services/chainservice.go b/services/chainservice.go index 0c8c99e7b..7f666078f 100644 --- a/services/chainservice.go +++ b/services/chainservice.go @@ -265,6 +265,13 @@ func (cs *ChainService) StartService() error { return fmt.Errorf("failed initializing s3 blockdb: %v", err) } cs.logger.Infof("S3 blockdb initialized at %v", utils.Config.BlockDb.S3.Bucket) + case "tiered": + err := blockdb.InitWithTiered(utils.Config.BlockDb.Tiered, cs.logger) + if err != nil { + return fmt.Errorf("failed initializing tiered blockdb: %v", err) + } + cs.logger.Infof("Tiered blockdb initialized (Pebble cache: %v, S3: %v)", + utils.Config.BlockDb.Tiered.Pebble.Path, utils.Config.BlockDb.Tiered.S3.Bucket) default: cs.logger.Infof("Blockdb disabled") } diff --git a/services/chainservice_blocks.go b/services/chainservice_blocks.go index e063ef81c..de3f52549 100644 --- a/services/chainservice_blocks.go +++ b/services/chainservice_blocks.go @@ -9,9 +9,11 @@ import ( "github.com/attestantio/go-eth2-client/spec" "github.com/attestantio/go-eth2-client/spec/deneb" + "github.com/attestantio/go-eth2-client/spec/gloas" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/ethpandaops/dora/blockdb" + btypes "github.com/ethpandaops/dora/blockdb/types" "github.com/ethpandaops/dora/db" "github.com/ethpandaops/dora/dbtypes" "github.com/ethpandaops/dora/indexer/beacon" @@ -22,6 +24,7 @@ type CombinedBlockResponse struct { Root phase0.Root Header *phase0.SignedBeaconBlockHeader Block *spec.VersionedSignedBeaconBlock + Payload *gloas.SignedExecutionPayloadEnvelope Orphaned bool } @@ -103,6 +106,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockInfo.Root, Header: blockInfo.GetHeader(), Block: blockInfo.GetBlock(ctx), + Payload: blockInfo.GetExecutionPayload(ctx), Orphaned: !bs.beaconIndexer.IsCanonicalBlock(blockInfo, nil), } } @@ -115,6 +119,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockInfo.Root, Header: blockInfo.GetHeader(), Block: blockInfo.GetBlock(ctx), + Payload: blockInfo.GetExecutionPayload(ctx), Orphaned: true, } } @@ -127,18 +132,34 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot } var block *spec.VersionedSignedBeaconBlock + var payload *gloas.SignedExecutionPayloadEnvelope bodyRetry := 0 for ; bodyRetry < 3; bodyRetry++ { client := clients[bodyRetry%len(clients)] - block, err = beacon.LoadBeaconBlock(ctx, client, blockroot) - if block != nil { - break - } else if err != nil { - log := logrus.WithError(err) - if client != nil { - log = log.WithField("client", client.GetClient().GetName()) + if block == nil { + block, err = beacon.LoadBeaconBlock(ctx, client, blockroot) + if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block body for root 0x%x", blockroot) } - log.Warnf("Error loading block body for root 0x%x", blockroot) + } + + if block.Version >= spec.DataVersionGloas { + payload, err = beacon.LoadExecutionPayload(ctx, client, blockroot) + if payload != nil { + break + } else if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block payload for root 0x%x", blockroot) + } + } else if block != nil { + break } } if err == nil && block != nil { @@ -146,6 +167,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot Root: blockroot, Header: header, Block: block, + Payload: payload, Orphaned: false, } } @@ -153,16 +175,24 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot // try loading from block db if result == nil && header != nil && blockdb.GlobalBlockDb != nil { - blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], func(version uint64, block []byte) (interface{}, error) { - return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) - }) - if err == nil && blockData != nil { - result = &CombinedBlockResponse{ + blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], + btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload, + func(version uint64, block []byte) (any, error) { + return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) + }, func(version uint64, payload []byte) (any, error) { + return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload) + }) + if err == nil && blockData != nil && blockData.Body != nil { + resp := &CombinedBlockResponse{ Root: blockroot, Header: header, Block: blockData.Body.(*spec.VersionedSignedBeaconBlock), Orphaned: false, } + if blockData.Payload != nil { + resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope) + } + result = resp } } @@ -232,6 +262,7 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl Root: cachedBlock.Root, Header: blockHeader, Block: blockBody, + Payload: cachedBlock.GetExecutionPayload(ctx), Orphaned: isOrphaned, } } @@ -248,25 +279,40 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl var err error var block *spec.VersionedSignedBeaconBlock + var payload *gloas.SignedExecutionPayloadEnvelope bodyRetry := 0 for ; bodyRetry < 3; bodyRetry++ { client := clients[bodyRetry%len(clients)] block, err = beacon.LoadBeaconBlock(ctx, client, blockRoot) - if block != nil { - break - } else if err != nil { + if err != nil { log := logrus.WithError(err) if client != nil { log = log.WithField("client", client.GetClient().GetName()) } log.Warnf("Error loading block body for slot %v", slot) } + + if block != nil && block.Version >= spec.DataVersionGloas { + payload, err = beacon.LoadExecutionPayload(ctx, client, blockRoot) + if payload != nil { + break + } else if err != nil { + log := logrus.WithError(err) + if client != nil { + log = log.WithField("client", client.GetClient().GetName()) + } + log.Warnf("Error loading block payload for root 0x%x", blockRoot) + } + } else if block != nil { + break + } } if err == nil && block != nil { result = &CombinedBlockResponse{ Root: blockRoot, Header: header, Block: block, + Payload: payload, Orphaned: orphaned, } } @@ -274,22 +320,30 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl // try loading from block db if result == nil && header != nil && blockdb.GlobalBlockDb != nil { - blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], func(version uint64, block []byte) (interface{}, error) { - return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) - }) - if err == nil && blockData != nil { + blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], + btypes.BlockDataFlagHeader|btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload, + func(version uint64, block []byte) (any, error) { + return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block) + }, func(version uint64, payload []byte) (any, error) { + return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload) + }) + if err == nil && blockData != nil && blockData.Body != nil { header := &phase0.SignedBeaconBlockHeader{} err = header.UnmarshalSSZ(blockData.HeaderData) if err != nil { return nil, err } - result = &CombinedBlockResponse{ + resp := &CombinedBlockResponse{ Root: blockRoot, Header: header, Block: blockData.Body.(*spec.VersionedSignedBeaconBlock), Orphaned: false, } + if blockData.Payload != nil { + resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope) + } + result = resp } } @@ -613,7 +667,8 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes // get blocks from cache // iterate from current slot to finalized slot - lastCanonicalBlock := bs.beaconIndexer.GetCanonicalHead(nil) + canonicalHead := bs.beaconIndexer.GetCanonicalHead(nil) + lastCanonicalBlock := canonicalHead // apply epoch filter to slot range cacheStartSlot := startSlot @@ -820,6 +875,28 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes } } + // filter by builder index + if filter.BuilderIndex != nil { + builderIndex := blockIndex.BuilderIndex + // Convert uint64 to int64 for comparison (-1 means self-built/MaxUint64) + var builderIndexInt64 int64 + if builderIndex == math.MaxUint64 { + builderIndexInt64 = -1 + } else { + builderIndexInt64 = int64(builderIndex) + } + if builderIndexInt64 != *filter.BuilderIndex { + continue + } + } + + // filter by EL block parent hash + if len(filter.EthBlockParentHash) > 0 { + if !bytes.Equal(blockIndex.ExecutionParentHash[:], filter.EthBlockParentHash) { + continue + } + } + // filter by gas used if filter.MinGasUsed != nil || filter.MaxGasUsed != nil { gasUsed := blockIndex.GasUsed @@ -853,6 +930,47 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes } } + // filter by payload status (runtime computation for unfinalized blocks) + // Only applies to gloas/ePBS blocks where payloads are separate from beacon blocks + blockEpoch := chainState.EpochOfSlot(block.Slot) + if filter.WithPayloadOrphaned != 1 && chainState.IsEip7732Enabled(blockEpoch) { + // Compute payload status by checking if any child block in the canonical chain + // builds on this block's execution payload + payloadIsCanonical := false + if blockIndex.ExecutionNumber > 0 { + // Get child blocks and check if any canonical child builds on this payload + childBlocks := bs.beaconIndexer.GetBlockByParentRoot(block.Root) + for _, child := range childBlocks { + childIndex := child.GetBlockIndex(ctx) + if childIndex == nil { + continue + } + // Check if child is in the canonical chain (use original head since + // children are at higher slots than the updated lastCanonicalBlock) + if !bs.beaconIndexer.IsCanonicalBlockByHead(child, canonicalHead) { + continue + } + // Check if child builds on this block's execution payload + if bytes.Equal(childIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) { + payloadIsCanonical = true + break + } + } + } else { + // No execution payload, treat as canonical for filtering purposes + payloadIsCanonical = true + } + + if filter.WithPayloadOrphaned == 0 && !payloadIsCanonical { + // only canonical payloads, skip orphaned + continue + } + if filter.WithPayloadOrphaned == 2 && payloadIsCanonical { + // only orphaned payloads, skip canonical + continue + } + } + cachedMatches = append(cachedMatches, cachedDbBlock{ slot: uint64(block.Slot), proposer: uint64(blockHeader.Message.ProposerIndex), @@ -863,7 +981,7 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes // reconstruct missing blocks from epoch duties // For slot/root filtering, we still need to check if we need missing blocks for that specific slot - shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 && filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil && filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 && filter.MinGasUsed == nil && filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil && filter.WithMevBlock == 0 + shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 && filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil && filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 && filter.BuilderIndex == nil && filter.WithPayloadOrphaned != 2 && len(filter.EthBlockParentHash) == 0 && filter.MinGasUsed == nil && filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil && filter.WithMevBlock == 0 && filter.ProposerIndex == nil && filter.ProposerName == "" // If filtering by slot, only check missing for that specific slot if filter.Slot != nil { diff --git a/services/chainservice_builder.go b/services/chainservice_builder.go new file mode 100644 index 000000000..49a4c9162 --- /dev/null +++ b/services/chainservice_builder.go @@ -0,0 +1,263 @@ +package services + +import ( + "bytes" + "context" + "slices" + "sort" + + "github.com/attestantio/go-eth2-client/spec/gloas" + "github.com/attestantio/go-eth2-client/spec/phase0" + + "github.com/ethpandaops/dora/db" + "github.com/ethpandaops/dora/dbtypes" + "github.com/ethpandaops/dora/indexer/beacon" +) + +// BuilderIndexFlag separates builder indices from validator indices +// A validator/builder index with this flag set is a builder index +const BuilderIndexFlag = beacon.BuilderIndexFlag + +type BuilderWithIndex struct { + Index gloas.BuilderIndex + Builder *gloas.Builder + Superseded bool +} + +// GetFilteredBuilderSet returns builders matching the filter criteria +func (bs *ChainService) GetFilteredBuilderSet(ctx context.Context, filter *dbtypes.BuilderFilter, withBalance bool) ([]BuilderWithIndex, uint64) { + var overrideForkId *beacon.ForkKey + + canonicalHead := bs.beaconIndexer.GetCanonicalHead(overrideForkId) + if canonicalHead == nil { + return nil, 0 + } + + var balances []phase0.Gwei + if withBalance { + balances = bs.beaconIndexer.GetRecentBuilderBalances(overrideForkId) + } + currentEpoch := bs.consensusPool.GetChainState().CurrentEpoch() + + cachedResults := make([]BuilderWithIndex, 0, 1000) + cachedIndexes := map[uint64]bool{} + + // Get matching entries from cached builders + bs.beaconIndexer.StreamActiveBuilderDataForRoot(canonicalHead.Root, false, ¤tEpoch, func(index gloas.BuilderIndex, flags uint16, activeData *beacon.BuilderData, builder *gloas.Builder) error { + if builder == nil { + return nil + } + if filter.MinIndex != nil && uint64(index) < *filter.MinIndex { + return nil + } + if filter.MaxIndex != nil && uint64(index) > *filter.MaxIndex { + return nil + } + if len(filter.PubKey) > 0 { + pubkeylen := min(len(filter.PubKey), 48) + if !bytes.Equal(builder.PublicKey[:pubkeylen], filter.PubKey) { + return nil + } + } + if len(filter.ExecutionAddress) > 0 { + if !bytes.Equal(builder.ExecutionAddress[:], filter.ExecutionAddress) { + return nil + } + } + + if len(filter.Status) > 0 { + builderStatus := getBuilderStatus(builder, currentEpoch, false) + if !slices.Contains(filter.Status, builderStatus) { + return nil + } + } + + cachedResults = append(cachedResults, BuilderWithIndex{ + Index: index, + Builder: builder, + }) + cachedIndexes[uint64(index)] = true + + return nil + }) + + // Get matching entries from DB + dbIndexes, err := db.GetBuilderIndexesByFilter(ctx, *filter, uint64(currentEpoch)) + if err != nil { + bs.logger.Warnf("error getting builder indexes by filter: %v", err) + return nil, 0 + } + + // Sort results + var sortFn func(builderA, builderB BuilderWithIndex) bool + switch filter.OrderBy { + case dbtypes.BuilderOrderIndexAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Index < builderB.Index + } + case dbtypes.BuilderOrderIndexDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Index > builderB.Index + } + case dbtypes.BuilderOrderPubKeyAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) < 0 + } + case dbtypes.BuilderOrderPubKeyDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) > 0 + } + case dbtypes.BuilderOrderBalanceAsc: + if balances == nil { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.Balance < builderB.Builder.Balance + } + } else { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return balances[builderA.Index] < balances[builderB.Index] + } + sort.Slice(dbIndexes, func(i, j int) bool { + if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) { + return dbIndexes[i] < dbIndexes[j] + } + return balances[dbIndexes[i]] < balances[dbIndexes[j]] + }) + } + case dbtypes.BuilderOrderBalanceDesc: + if balances == nil { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.Balance > builderB.Builder.Balance + } + } else { + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return balances[builderA.Index] > balances[builderB.Index] + } + sort.Slice(dbIndexes, func(i, j int) bool { + if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) { + return dbIndexes[i] > dbIndexes[j] + } + return balances[dbIndexes[i]] > balances[dbIndexes[j]] + }) + } + case dbtypes.BuilderOrderDepositEpochAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.DepositEpoch < builderB.Builder.DepositEpoch + } + case dbtypes.BuilderOrderDepositEpochDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.DepositEpoch > builderB.Builder.DepositEpoch + } + case dbtypes.BuilderOrderWithdrawableEpochAsc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.WithdrawableEpoch < builderB.Builder.WithdrawableEpoch + } + case dbtypes.BuilderOrderWithdrawableEpochDesc: + sortFn = func(builderA, builderB BuilderWithIndex) bool { + return builderA.Builder.WithdrawableEpoch > builderB.Builder.WithdrawableEpoch + } + } + + sort.Slice(cachedResults, func(i, j int) bool { + return sortFn(cachedResults[i], cachedResults[j]) + }) + + // Stream builder set from db and merge cached results + resCap := filter.Limit + if resCap == 0 { + resCap = uint64(len(cachedResults) + len(dbIndexes)) + } + result := make([]BuilderWithIndex, 0, resCap) + cachedIndex := 0 + matchingCount := uint64(0) + resultCount := uint64(0) + dbEntryCount := uint64(0) + + db.StreamBuildersByIndexes(ctx, dbIndexes, func(dbBuilder *dbtypes.Builder) bool { + dbEntryCount++ + builderWithIndex := BuilderWithIndex{ + Index: gloas.BuilderIndex(dbBuilder.BuilderIndex), + Builder: beacon.UnwrapDbBuilder(dbBuilder), + Superseded: dbBuilder.Superseded, + } + + for cachedIndex < len(cachedResults) && (cachedResults[cachedIndex].Index == builderWithIndex.Index || sortFn(cachedResults[cachedIndex], builderWithIndex)) { + if matchingCount >= filter.Offset { + resultBuilder := cachedResults[cachedIndex] + if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) { + resultBuilder.Builder.Balance = balances[resultBuilder.Index] + } + result = append(result, resultBuilder) + resultCount++ + } + matchingCount++ + cachedIndex++ + + if filter.Limit > 0 && resultCount >= filter.Limit { + return false // stop streaming + } + } + + if cachedIndexes[dbBuilder.BuilderIndex] { + return true // skip this index, cache entry is newer + } + + if matchingCount >= filter.Offset { + if !builderWithIndex.Superseded && balances != nil && dbBuilder.BuilderIndex < uint64(len(balances)) { + builderWithIndex.Builder.Balance = balances[dbBuilder.BuilderIndex] + } + result = append(result, builderWithIndex) + resultCount++ + } + matchingCount++ + + if filter.Limit > 0 && resultCount >= filter.Limit { + return false // stop streaming + } + + return true // get more from db + }) + + for cachedIndex < len(cachedResults) && (filter.Limit == 0 || resultCount < filter.Limit) { + if matchingCount >= filter.Offset { + resultBuilder := cachedResults[cachedIndex] + if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) { + resultBuilder.Builder.Balance = balances[resultBuilder.Index] + } + result = append(result, resultBuilder) + resultCount++ + } + matchingCount++ + cachedIndex++ + } + + // Add remaining cached results + matchingCount += uint64(len(cachedResults) - cachedIndex) + + // Add remaining db results + remainingDbCount := uint64(0) + for i := dbEntryCount; i < uint64(len(dbIndexes)); i++ { + if cachedIndexes[dbIndexes[i]] { + continue + } + remainingDbCount++ + } + matchingCount += remainingDbCount + + return result, matchingCount +} + +// GetBuilderByIndex returns the builder by index +func (bs *ChainService) GetBuilderByIndex(index gloas.BuilderIndex) *gloas.Builder { + return bs.beaconIndexer.GetBuilderByIndex(index, nil) +} + +// getBuilderStatus determines the status of a builder +func getBuilderStatus(builder *gloas.Builder, currentEpoch phase0.Epoch, superseded bool) dbtypes.BuilderStatus { + if superseded { + return dbtypes.BuilderStatusSupersededFilter + } + if builder.WithdrawableEpoch <= currentEpoch { + return dbtypes.BuilderStatusExitedFilter + } + return dbtypes.BuilderStatusActiveFilter +} diff --git a/services/chainservice_deposits.go b/services/chainservice_deposits.go index 5491a2530..073039dc7 100644 --- a/services/chainservice_deposits.go +++ b/services/chainservice_deposits.go @@ -306,7 +306,8 @@ func (bs *ChainService) GetDepositOperationsByFilter(ctx context.Context, filter if len(txFilter.WithdrawalAddress) > 0 { wdcreds := depositWithTx.WithdrawalCredentials - if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 { + // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit + if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 && wdcreds[0] != 0x03 { continue } @@ -527,11 +528,11 @@ func (bs *ChainService) GetIndexedDepositQueue(ctx context.Context, headBlock *b indexedQueue.QueueEstimation = queueEpoch if lastNormalDeposit != nil && !bytes.Equal(lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) { - // something is bad, return empty queue - logrus.Warnf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue, %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) - return &IndexedDepositQueue{ - Queue: []*IndexedDepositQueueEntry{}, - } + // Mismatch between queue and included deposits - this can happen if there are + // builder deposits (0x03) that skip the queue. Log warning but still return + // the queue to show useful information. The deposit indexes might not be perfectly + // matched but the queue itself is still valid. + logrus.Debugf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue (possibly due to builder deposits), %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) } return indexedQueue @@ -599,7 +600,17 @@ func (bs *ChainService) getLastIncludedDeposit(ctx context.Context, headRoot pha } if len(deposits) > 0 { - lastDeposits = deposits + // Filter out builder deposits (0x03) as they skip the queue + filteredDeposits := make([]*dbtypes.Deposit, 0, len(deposits)) + for _, deposit := range deposits { + if len(deposit.WithdrawalCredentials) > 0 && deposit.WithdrawalCredentials[0] == 0x03 { + continue // Skip builder deposits + } + filteredDeposits = append(filteredDeposits, deposit) + } + if len(filteredDeposits) > 0 { + lastDeposits = filteredDeposits + } } } } diff --git a/static/css/layout.css b/static/css/layout.css index e0df61b79..665ee04d7 100644 --- a/static/css/layout.css +++ b/static/css/layout.css @@ -329,6 +329,26 @@ span.validator-label { padding: 1px .25rem; } +.badge.split-warning { + background: linear-gradient( + 90deg, + rgba(255,255,255,0) 0%, + rgba(255,255,255,0) 50%, + rgba(255,193,7,1) 50%, + rgba(255,193,7,1) 100% + ); +} + +.badge.split-info { + background: linear-gradient( + 90deg, + rgba(255,255,255,0) 0%, + rgba(255,255,255,0) 50%, + rgba(13,202,240,1) 50%, + rgba(13,202,240,1) 100% + ); +} + .text-monospace { font-family: var(--bs-font-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace) !important; } diff --git a/templates/builder/builder.html b/templates/builder/builder.html new file mode 100644 index 000000000..b67384236 --- /dev/null +++ b/templates/builder/builder.html @@ -0,0 +1,235 @@ +{{ define "page" }} +
+
+

Builder {{ formatBuilderWithIndex .Index .Name }}

+ +
+ +
+
+ +
+
+ Status: +
+
+ +
+
+
+
Deposited
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
Active
+
+ + + +
+
+
+
+
+
+
+
+
+ {{ if .ShowWithdrawable }} + + {{ end }} +
+
+
+
+
Exited
+
+ +
+
+
+
+
+
+
+
+
+
+
+ +
+
Index:
+
+ {{ formatBuilderWithIndex .Index .Name }} + +
+
+
+
Public Key:
+
+ 0x{{ printf "%x" .PublicKey }} + +
+
+
+
Execution Address:
+
+ {{ ethAddressLink .ExecutionAddress }} +
+
+
+
Status:
+
+ {{ if eq .State "Active" }} + Active + {{ else if eq .State "Exited" }} + Exited + {{ else if eq .State "Superseded" }} + Superseded + {{ else }} + {{ .State }} + {{ end }} +
+
+
+
Balance:
+
+ {{ formatEthFromGwei .Balance }} + +
+
+
+
Version:
+
+ {{ .Version }} +
+
+ {{ if .ShowDeposit }} +
+
Deposit Epoch:
+
+ {{ formatAddCommas .DepositEpoch }} + ({{ formatRecentTimeShort .DepositTs }}) +
+
+ {{ end }} + {{ if .ShowWithdrawable }} +
+
Withdrawable Epoch:
+
+ {{ formatAddCommas .WithdrawableEpoch }} + ({{ formatRecentTimeShort .WithdrawableTs }}) +
+
+ {{ end }} + +
+
+ + + + +
+
+ {{ if eq .TabView "blocks" }} + {{ template "recentBlocks" . }} + {{ end }} +
+
+ {{ if eq .TabView "bids" }} + {{ template "recentBids" . }} + {{ end }} +
+
+ {{ if eq .TabView "deposits" }} + {{ template "recentDeposits" . }} + {{ end }} +
+
+ +
+{{ end }} +{{ define "lazyPage" }} + {{ if eq .TabView "blocks" }} + {{ template "recentBlocks" . }} + {{ else if eq .TabView "bids" }} + {{ template "recentBids" . }} + {{ else if eq .TabView "deposits" }} + {{ template "recentDeposits" . }} + {{ else }} + Unknown tab + {{ end }} +{{ end }} +{{ define "js" }} + +{{ end }} +{{ define "css" }} + +{{ end }} diff --git a/templates/builder/notfound.html b/templates/builder/notfound.html new file mode 100644 index 000000000..b92472273 --- /dev/null +++ b/templates/builder/notfound.html @@ -0,0 +1,27 @@ +{{ define "js" }} +{{ end }} + +{{ define "css" }} +{{ end }} + +{{ define "page" }} +
+
+
+

Builder not found

+ +
+
+
+
+
Sorry but we could not find the builder you are looking for. The builder may not exist or has not been indexed yet.
+
+
+
+{{ end }} diff --git a/templates/builder/recentBids.html b/templates/builder/recentBids.html new file mode 100644 index 000000000..d3cbddb7b --- /dev/null +++ b/templates/builder/recentBids.html @@ -0,0 +1,57 @@ +{{ define "recentBids" }} +
+
+
+ + + + + + + + + + + + + + {{ if gt (len .RecentBids) 0 }} + {{ range $i, $bid := .RecentBids }} + + + + + + + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
SlotTimeBlock HashGas LimitValueEL PaymentStatus
{{ formatAddCommas $bid.Slot }}{{ formatRecentTimeShort $bid.Ts }} + + 0x{{ printf "%x" $bid.BlockHash }} + + + {{ formatAddCommas $bid.GasLimit }}{{ formatEthFromGwei $bid.Value }}{{ formatEthFromGwei $bid.ElPayment }} + {{ if $bid.IsWinning }} + Won + {{ else }} + - + {{ end }} +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builder/recentBlocks.html b/templates/builder/recentBlocks.html new file mode 100644 index 000000000..c03749e4a --- /dev/null +++ b/templates/builder/recentBlocks.html @@ -0,0 +1,58 @@ +{{ define "recentBlocks" }} +
+
+
+ + + + + + + + + + + + + {{ if gt (len .RecentBlocks) 0 }} + {{ range $i, $block := .RecentBlocks }} + + + + + + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
EpochSlotBlock HashStatusTimeValue
{{ formatAddCommas $block.Epoch }}{{ formatAddCommas $block.Slot }} + + 0x{{ printf "%x" $block.BlockHash }} + + + {{ if eq $block.Status 0 }} + Missing + {{ else if eq $block.Status 1 }} + Canonical + {{ else if eq $block.Status 2 }} + Orphaned + {{ else }} + Unknown + {{ end }} + {{ formatRecentTimeShort $block.Ts }}{{ formatEthFromGwei $block.Value }}
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builder/recentDeposits.html b/templates/builder/recentDeposits.html new file mode 100644 index 000000000..19f3177f5 --- /dev/null +++ b/templates/builder/recentDeposits.html @@ -0,0 +1,52 @@ +{{ define "recentDeposits" }} +
+
+
+ + + + + + + + + + + {{ if gt (len .RecentDeposits) 0 }} + {{ range $i, $deposit := .RecentDeposits }} + + + + + + + {{ end }} + {{ else }} + + + + + + {{ end }} + +
TypeSlotTimeStatus
+ {{ if eq $deposit.Type "exit" }} + Voluntary Exit + {{ else }} + {{ $deposit.Type }} + {{ end }} + {{ formatAddCommas $deposit.SlotNumber }}{{ formatRecentTimeShort $deposit.Time }} + {{ if $deposit.Orphaned }} + Orphaned + {{ else }} + Included + {{ end }} +
+
+ {{ template "timeline_svg" }} +
+
+
+
+
+{{ end }} diff --git a/templates/builders/builders.html b/templates/builders/builders.html new file mode 100644 index 000000000..25869e1a2 --- /dev/null +++ b/templates/builders/builders.html @@ -0,0 +1,303 @@ +{{ define "page" }} +
+
+

Builders Overview

+ +
+ +
+
+ + {{ if not .IsDefaultSorting }}{{ end }} +
+
+ Builder Filters +
+
+
+
+
+
+
+ PubKey +
+
+ +
+
+
+
+ Index +
+
+ +
+
+
+
+ Execution Address +
+
+ +
+
+
+
+
+
+
+
+ Status +
+
+ +
+
+ +
+
+ +
+
+
+ +
+
+
+ +
+
+
+
+
+
+ +
+
+
+ + + + + + + + + + + + + {{ if gt .BuilderCount 0 }} + + {{ range $i, $builder := .Builders }} + + + + + + + + + + {{ end }} + + {{ else }} + + + + + + + + {{ end }} +
+ Index +
+ + +
+
+ Public Key +
+ + +
+
Execution Address + Balance +
+ + +
+
State + Deposit +
+ + +
+
+ Withdrawable +
+ + +
+
{{ formatAddCommas $builder.Index }}0x{{ printf "%x" $builder.PublicKey }}{{ ethAddressLink .ExecutionAddress }}{{ formatEthFromGwei $builder.Balance }}{{ $builder.State }} + {{- if $builder.ShowDeposit -}} + {{ formatRecentTimeShort $builder.DepositTs }} + (Epoch {{ formatAddCommas $builder.DepositEpoch }}) + {{- else -}} + - + {{- end -}} + + {{- if $builder.ShowWithdrawable -}} + {{ formatRecentTimeShort $builder.WithdrawableTs }} + (Epoch {{ formatAddCommas $builder.WithdrawableEpoch }}) + {{- else -}} + - + {{- end -}} +
+
+ {{ template "professor_svg" }} +
+
+
+ {{ if gt .TotalPages 1 }} +
+
+
+
Showing builder {{ .FirstBuilder }} to {{ .LastBuilder }}
+
+
+
+
+
+ + {{ range $key, $value := .UrlParams }} + {{ if ne $key "p" }} + + {{ end }} + {{ end }} + {{ if not .IsDefaultSorting }} + + {{ end }} +
+ + +
+
+
+
+ +
+
+
+ {{ end }} +
+ +
+
+{{ end }} +{{ define "js" }} + + +{{ end }} +{{ define "css" }} + + +{{ end }} diff --git a/templates/deposits/deposits.html b/templates/deposits/deposits.html index f9b27d0cb..cd6bd9f77 100644 --- a/templates/deposits/deposits.html +++ b/templates/deposits/deposits.html @@ -162,7 +162,11 @@
This table displays the deposits received by the Beac
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} @@ -187,18 +191,18 @@
This table displays the deposits received by the Beac {{ end }} {{ if $deposit.IsQueued }} - Queued {{ end }} {{ if $deposit.InvalidSignature }} - @@ -295,7 +299,11 @@
This table displays the deposits made for validators
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} @@ -410,7 +418,11 @@
This table displays deposits waiting to be activated
{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/epoch/epoch.html b/templates/epoch/epoch.html index f047ee436..8f6a8b271 100644 --- a/templates/epoch/epoch.html +++ b/templates/epoch/epoch.html @@ -177,15 +177,15 @@

{{ else if $slot.Scheduled }} Scheduled {{ else if eq $slot.Status 1 }} - Proposed + Proposed {{ else if eq $slot.Status 2 }} - Orphaned + Orphaned {{ else if not $epoch.Synchronized }} ? {{ else if eq $slot.Status 0 }} Missed {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/exits/exits.html b/templates/exits/exits.html index 62217b0e6..e552fa0a8 100644 --- a/templates/exits/exits.html +++ b/templates/exits/exits.html @@ -138,7 +138,11 @@

This table displays the most recent voluntary exit re {{ formatRecentTimeShort $exit.Time }} - {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ if $exit.IsBuilder }} + {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ end }}
diff --git a/templates/included_deposits/included_deposits.html b/templates/included_deposits/included_deposits.html index 240dd3684..e3a650abb 100644 --- a/templates/included_deposits/included_deposits.html +++ b/templates/included_deposits/included_deposits.html @@ -194,7 +194,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/index/recentBlocks.html b/templates/index/recentBlocks.html index 47f381bc1..172084371 100644 --- a/templates/index/recentBlocks.html +++ b/templates/index/recentBlocks.html @@ -41,9 +41,9 @@
Genesis Missed - Proposed - Missed (Orphaned) - Unknown + Proposed + Missed (Orphaned) + Unknown @@ -74,11 +74,11 @@
Missed {{ else if eq .Status 1 }} - Proposed + Proposed {{ else if eq .Status 2 }} - Missed (Orphaned) + Missed (Orphaned) {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $block.Ts }} diff --git a/templates/index/recentSlots.html b/templates/index/recentSlots.html index 118619238..7a47f7cd3 100644 --- a/templates/index/recentSlots.html +++ b/templates/index/recentSlots.html @@ -42,9 +42,9 @@
Genesis Missed - Proposed - Missed (Orphaned) - Unknown + Proposed + Missed (Orphaned) + Unknown @@ -97,11 +97,11 @@
Missed {{ else if eq .Status 1 }} - Proposed + Proposed {{ else if eq .Status 2 }} - Orphaned + Missed (Orphaned) {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/initiated_deposits/initiated_deposits.html b/templates/initiated_deposits/initiated_deposits.html index 35814a1e2..3e274b914 100644 --- a/templates/initiated_deposits/initiated_deposits.html +++ b/templates/initiated_deposits/initiated_deposits.html @@ -166,7 +166,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/queued_deposits/queued_deposits.html b/templates/queued_deposits/queued_deposits.html index ed52a4250..88980d9a9 100644 --- a/templates/queued_deposits/queued_deposits.html +++ b/templates/queued_deposits/queued_deposits.html @@ -135,7 +135,11 @@

{{ if $deposit.ValidatorExists }} - {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ if $deposit.IsBuilder }} + {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }} + {{ end }} {{ else }} 0x{{ printf "%x" $deposit.PublicKey }} {{ end }} diff --git a/templates/slot/bids.html b/templates/slot/bids.html new file mode 100644 index 000000000..37f6184f8 --- /dev/null +++ b/templates/slot/bids.html @@ -0,0 +1,42 @@ +{{ define "block_bids" }} +
+ + + + + + + + + + + + + + {{ range $i, $bid := .Block.Bids }} + + + + + + + + + + {{ end }} + +
BuilderBlock HashFee RecipientGas LimitValueEL PaymentTotal
+ {{ if $bid.IsSelfBuilt }} + Self-built + {{ else }} + {{ formatValidatorWithIndex $bid.BuilderIndex $bid.BuilderName }} + {{ end }} + {{ if $bid.IsWinning }}Winner{{ end }} + +
+ 0x{{ printf "%x" $bid.BlockHash }} + +
+
{{ ethAddressLink $bid.FeeRecipient }}{{ formatAddCommas $bid.GasLimit }}{{ formatEthFromGwei $bid.Value }}{{ formatEthFromGwei $bid.ElPayment }}{{ formatEthFromGwei $bid.TotalValue }}
+
+{{ end }} diff --git a/templates/slot/overview.html b/templates/slot/overview.html index 40c031a5a..eb5505275 100644 --- a/templates/slot/overview.html +++ b/templates/slot/overview.html @@ -204,15 +204,27 @@
{{ end }} - {{ if .Block.ExecutionData }} + + {{ if .Block.PayloadHeader }} {{ $block := .Block }} - {{ with .Block.ExecutionData }} + {{ with .Block.PayloadHeader }}
-
Execution Payload:
+
Payload Header:
+
-
Block Number:
-
{{ ethBlockLink .BlockNumber }}
+
Payload Status:
+
+ {{ if eq .PayloadStatus 0 }} + Missing + {{ else if eq .PayloadStatus 1 }} + Revealed + {{ else if eq .PayloadStatus 2 }} + Orphaned + {{ else }} + Unknown + {{ end }} +
@@ -226,11 +238,85 @@
Parent Hash:
- 0x{{ printf "%x" .ParentHash }} - + {{ ethBlockHashLink .ParentBlockHash }} + +
+
+ +
+
Builder:
+
+ {{ formatBuilderWithIndex .BuilderIndex .BuilderName }}
+
+
Block Value:
+
+ {{ formatEthFromGwei .Value }} +
+
+ +
+
Gas Limit:
+
+ {{ .GasLimit }} +
+
+ +
+
Blob KZG Commitments:
+
+ {{ len .BlobKZGCommitments }} blob{{ if ne (len .BlobKZGCommitments) 1 }}s{{ end }} + {{ if .BlobKZGCommitments }} + +
+ {{ range $i, $c := .BlobKZGCommitments }} +
+ {{ $i }}: + 0x{{ printf "%x" $c }} + +
+ {{ end }} +
+ {{ end }} +
+
+
+
+ {{ end }} + {{ end }} + {{ if .Block.ExecutionData }} + {{ $block := .Block }} + {{ with .Block.ExecutionData }} +
+
Execution Payload:
+
+
+
Block Number:
+
{{ ethBlockLink .BlockNumber }}
+
+ + {{ if not $block.PayloadHeader }} +
+
Block Hash:
+
+ {{ ethBlockHashLink .BlockHash }} + +
+
+ +
+
Parent Hash:
+
+ 0x{{ printf "%x" .ParentHash }} + +
+
+ {{ end }} + {{ if .StateRoot }}
State Root:
@@ -281,10 +367,12 @@
-
-
Gas Limit:
-
{{ formatAddCommas .GasLimit }}
-
+ {{ if not $block.PayloadHeader }} +
+
Gas Limit:
+
{{ formatAddCommas .GasLimit }}
+
+ {{ end }}
Base fee per gas:
diff --git a/templates/slot/ptc_votes.html b/templates/slot/ptc_votes.html new file mode 100644 index 000000000..6f3d13f5e --- /dev/null +++ b/templates/slot/ptc_votes.html @@ -0,0 +1,106 @@ +{{ define "block_ptc_votes" }} +
+ {{ if .Block.PtcVotes }} +
+ + PTC (Payload Timeliness Committee) votes included in this block are for + slot {{ .Block.PtcVotes.VotedSlot }} (the previous slot). + {{ if .Block.PtcVotes.VotedBlockRoot }} +
Voted block root: 0x{{ printf "%x" .Block.PtcVotes.VotedBlockRoot }} + {{ end }} +
+ +
+
+
+
+
{{ formatParticipation .Block.PtcVotes.Participation }}
+ Participation +
+
+
+
+
+
+
{{ len .Block.PtcVotes.Aggregates }}
+ Aggregates +
+
+
+
+
+
+
{{ .Block.PtcVotes.TotalPtcSize }}
+ Committee Size +
+
+
+
+ +
Vote Aggregates
+ {{ range $i, $agg := .Block.PtcVotes.Aggregates }} +
+
+
+ + {{ if and $agg.PayloadPresent $agg.BlobDataAvailable }} + Payload + Blob Available + {{ else if $agg.PayloadPresent }} + Payload Only + {{ else if $agg.BlobDataAvailable }} + Blob Only + {{ else }} + Unavailable + {{ end }} + + + {{ $agg.VoteCount }} vote{{ if ne $agg.VoteCount 1 }}s{{ end }} + +
+
+ + PayloadPresent: {{ if $agg.PayloadPresent }}Yes{{ else }}No{{ end }} + + + BlobDataAvailable: {{ if $agg.BlobDataAvailable }}Yes{{ else }}No{{ end }} + +
+
+
+ {{ if gt (len $agg.Validators) 0 }} +
+ Participating Validators ({{ len $agg.Validators }}): +
+
+ {{ range $j, $vidx := $agg.Validators }} + + {{ formatValidator $vidx (index $.Block.ValidatorNames $vidx) }} + + {{ end }} +
+ {{ else }} +

No validators in this aggregate

+ {{ end }} +
+
+ {{ end }} + + {{ if gt (len .Block.PtcVotes.PtcCommittee) 0 }} +
Full PTC Committee
+
+
+
+ {{ range $i, $member := .Block.PtcVotes.PtcCommittee }} + + {{ formatValidator $member.Index $member.Name }} + + {{ end }} +
+
+
+ {{ end }} + {{ else }} +

No PTC vote data available.

+ {{ end }} +
+{{ end }} diff --git a/templates/slot/slot.html b/templates/slot/slot.html index 74fe5c764..fc9bf9b7e 100644 --- a/templates/slot/slot.html +++ b/templates/slot/slot.html @@ -43,9 +43,19 @@

Transactions {{ .Block.TransactionsCount }} {{ end }} + {{ if gt .Block.BidsCount 0 }} + + {{ end }} + {{ if gt .Block.PtcVotesCount 0 }} + + {{ end }} {{ if gt .Block.DepositsCount 0 }}

{{ end }} + {{ if gt .Block.BidsCount 0 }} +
+
+
+
+

Showing {{ .Block.BidsCount }} Execution Payload Bids

+
+
+ {{ template "block_bids" . }} +
+
+ {{ end }} + {{ if gt .Block.PtcVotesCount 0 }} +
+
+
+
+

Showing {{ .Block.PtcVotesCount }} PTC Votes (for slot {{ .Block.PtcVotes.VotedSlot }})

+
+
+ {{ template "block_ptc_votes" . }} +
+
+ {{ end }} {{ if .Block }}
diff --git a/templates/slot/voluntary_exits.html b/templates/slot/voluntary_exits.html index 0ffbc0333..187999d64 100644 --- a/templates/slot/voluntary_exits.html +++ b/templates/slot/voluntary_exits.html @@ -13,7 +13,13 @@ {{ range $i, $exit := .Block.VoluntaryExits }} {{ $i }} - {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + + {{ if $exit.IsBuilder }} + {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }} + {{ end }} + {{ $exit.Epoch }} 0x{{ printf "%x" $exit.Signature }} diff --git a/templates/slots/slots.html b/templates/slots/slots.html index fbfe82b19..596d7d6f5 100644 --- a/templates/slots/slots.html +++ b/templates/slots/slots.html @@ -132,9 +132,9 @@

Slots

{{ if eq $slot.Slot 0 }} Genesis {{ else if eq $slot.Status 1 }} - Proposed + Proposed {{ else if eq $slot.Status 2 }} - Orphaned + Missed (Orphaned) {{ else if $slot.Scheduled }} Scheduled {{ else if not $slot.Synchronized }} @@ -142,7 +142,7 @@

Slots

{{ else if eq $slot.Status 0 }} Missed {{ else }} - Unknown + Unknown {{ end }} {{ end }} diff --git a/templates/slots_filtered/slots_filtered.html b/templates/slots_filtered/slots_filtered.html index 4afd650f6..5967e6af7 100644 --- a/templates/slots_filtered/slots_filtered.html +++ b/templates/slots_filtered/slots_filtered.html @@ -310,9 +310,9 @@

Filtered Slots

{{- if eq $slot.Slot 0 }} Genesis {{- else if eq $slot.Status 1 }} - Proposed + Proposed {{- else if eq $slot.Status 2 }} - Orphaned + Missed (Orphaned) {{- else if $slot.Scheduled }} Scheduled {{- else if not $slot.Synchronized }} @@ -320,7 +320,7 @@

Filtered Slots

{{- else if eq $slot.Status 0 }} Missed {{- else }} - Unknown + Unknown {{- end }} {{- end }} diff --git a/templates/validator_slots/slots.html b/templates/validator_slots/slots.html index 214945920..ec6932e5b 100644 --- a/templates/validator_slots/slots.html +++ b/templates/validator_slots/slots.html @@ -71,16 +71,16 @@

Validator {{ format {{ if eq $slot.Slot 0 }} Genesis + {{ else if eq $slot.Status 1 }} + Proposed + {{ else if eq $slot.Status 2 }} + Missed (Orphaned) {{ else if $slot.Scheduled }} Scheduled {{ else if eq $slot.Status 0 }} Missed - {{ else if eq $slot.Status 1 }} - Proposed - {{ else if eq $slot.Status 2 }} - Orphaned {{ else }} - Unknown + Unknown {{ end }} {{ formatRecentTimeShort $slot.Ts }} diff --git a/templates/voluntary_exits/voluntary_exits.html b/templates/voluntary_exits/voluntary_exits.html index d7fb14c17..93b48154d 100644 --- a/templates/voluntary_exits/voluntary_exits.html +++ b/templates/voluntary_exits/voluntary_exits.html @@ -139,7 +139,13 @@

{{ formatAddCommas $voluntaryExit.SlotNumber }} {{ end }} {{ formatRecentTimeShort $voluntaryExit.Time }} - {{ formatValidatorWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + + {{ if $voluntaryExit.IsBuilder }} + {{ formatBuilderWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + {{ else }} + {{ formatValidatorWithIndex $voluntaryExit.ValidatorIndex $voluntaryExit.ValidatorName }} + {{ end }} +
diff --git a/types/config.go b/types/config.go index 4a5dbb73a..fd340fae5 100644 --- a/types/config.go +++ b/types/config.go @@ -162,9 +162,10 @@ type Config struct { Database DatabaseConfig `yaml:"database"` BlockDb struct { - Engine string `yaml:"engine" envconfig:"BLOCKDB_ENGINE"` + Engine string `yaml:"engine" envconfig:"BLOCKDB_ENGINE"` // "pebble", "s3", or "tiered" Pebble PebbleBlockDBConfig `yaml:"pebble"` S3 S3BlockDBConfig `yaml:"s3"` + Tiered TieredBlockDBConfig `yaml:"tiered"` // For tiered storage (Pebble cache + S3 backend) } `yaml:"blockDb"` KillSwitch struct { @@ -256,19 +257,45 @@ type PgsqlWriterDatabaseConfig struct { MaxIdleConns int `yaml:"maxIdleConns" envconfig:"DATABASE_PGSQL_WRITER_MAX_IDLE_CONNS"` } +// BlockDbRetentionConfig configures per-object-type retention behavior. +type BlockDbRetentionConfig struct { + Enabled bool `yaml:"enabled"` + RetentionTime time.Duration `yaml:"retentionTime"` // For age-based cleanup + MaxSize int64 `yaml:"maxSize"` // Size limit in MB (0 = unlimited) + CleanupMode string `yaml:"cleanupMode"` // "age" or "lru" +} + +// PebbleBlockDBConfig configures the Pebble (local) storage engine. type PebbleBlockDBConfig struct { - Path string `yaml:"path" envconfig:"BLOCKDB_ROCKSDB_PATH"` - CacheSize int `yaml:"cacheSize" envconfig:"BLOCKDB_ROCKSDB_CACHE_SIZE"` + Path string `yaml:"path" envconfig:"BLOCKDB_PEBBLE_PATH"` + CacheSize int `yaml:"cacheSize" envconfig:"BLOCKDB_PEBBLE_CACHE_SIZE"` // Pebble internal cache in MB + + // Per-object-type retention configuration (used in tiered mode) + HeaderRetention BlockDbRetentionConfig `yaml:"headerRetention"` + BodyRetention BlockDbRetentionConfig `yaml:"bodyRetention"` + PayloadRetention BlockDbRetentionConfig `yaml:"payloadRetention"` + BalRetention BlockDbRetentionConfig `yaml:"balRetention"` + + // Cleanup configuration + CleanupInterval time.Duration `yaml:"cleanupInterval" envconfig:"BLOCKDB_PEBBLE_CLEANUP_INTERVAL"` } +// S3BlockDBConfig configures the S3 (remote) storage engine. type S3BlockDBConfig struct { - Endpoint string `yaml:"endpoint" envconfig:"BLOCKDB_S3_ENDPOINT"` - Secure YamlBool `yaml:"secure" envconfig:"BLOCKDB_S3_SECURE"` - Bucket string `yaml:"bucket" envconfig:"BLOCKDB_S3_BUCKET"` - Region string `yaml:"region" envconfig:"BLOCKDB_S3_REGION"` - AccessKey string `yaml:"accessKey" envconfig:"BLOCKDB_S3_ACCESS_KEY"` - SecretKey string `yaml:"secretKey" envconfig:"BLOCKDB_S3_SECRET_KEY"` - Path string `yaml:"path" envconfig:"BLOCKDB_S3_PATH"` + Endpoint string `yaml:"endpoint" envconfig:"BLOCKDB_S3_ENDPOINT"` + Secure YamlBool `yaml:"secure" envconfig:"BLOCKDB_S3_SECURE"` + Bucket string `yaml:"bucket" envconfig:"BLOCKDB_S3_BUCKET"` + Region string `yaml:"region" envconfig:"BLOCKDB_S3_REGION"` + AccessKey string `yaml:"accessKey" envconfig:"BLOCKDB_S3_ACCESS_KEY"` + SecretKey string `yaml:"secretKey" envconfig:"BLOCKDB_S3_SECRET_KEY"` + Path string `yaml:"path" envconfig:"BLOCKDB_S3_PATH"` + EnableRangeRequests bool `yaml:"enableRangeRequests" envconfig:"BLOCKDB_S3_ENABLE_RANGE_REQUESTS"` // Use HTTP Range requests for selective loading +} + +// TieredBlockDBConfig configures tiered storage (Pebble cache + S3 backend). +type TieredBlockDBConfig struct { + Pebble PebbleBlockDBConfig `yaml:"pebble"` + S3 S3BlockDBConfig `yaml:"s3"` } // YamlBool is a bool type that can be unmarshalled from both diff --git a/types/models/builders.go b/types/models/builders.go new file mode 100644 index 000000000..5bb02a877 --- /dev/null +++ b/types/models/builders.go @@ -0,0 +1,120 @@ +package models + +import ( + "time" +) + +// BuildersPageData is a struct to hold info for the builders page +type BuildersPageData struct { + FilterPubKey string `json:"filter_pubkey"` + FilterIndex string `json:"filter_index"` + FilterExecutionAddr string `json:"filter_execution_addr"` + FilterStatus string `json:"filter_status"` + FilterStatusOpts []BuildersPageDataStatusOption `json:"filter_status_opts"` + + Builders []*BuildersPageDataBuilder `json:"builders"` + BuilderCount uint64 `json:"builder_count"` + FirstBuilder uint64 `json:"first_builder"` + LastBuilder uint64 `json:"last_builder"` + Sorting string `json:"sorting"` + IsDefaultSorting bool `json:"default_sorting"` + IsDefaultPage bool `json:"default_page"` + TotalPages uint64 `json:"total_pages"` + PageSize uint64 `json:"page_size"` + CurrentPageIndex uint64 `json:"page_index"` + PrevPageIndex uint64 `json:"prev_page_index"` + NextPageIndex uint64 `json:"next_page_index"` + LastPageIndex uint64 `json:"last_page_index"` + FilteredPageLink string `json:"filtered_page_link"` + + UrlParams map[string]string `json:"url_params"` +} + +type BuildersPageDataStatusOption struct { + Status string `json:"status"` + Count uint64 `json:"count"` +} + +type BuildersPageDataBuilder struct { + Index uint64 `json:"index"` + PublicKey []byte `json:"pubkey"` + ExecutionAddress []byte `json:"execution_address"` + Balance uint64 `json:"balance"` + State string `json:"state"` + ShowDeposit bool `json:"show_deposit"` + DepositTs time.Time `json:"deposit_ts"` + DepositEpoch uint64 `json:"deposit_epoch"` + ShowWithdrawable bool `json:"show_withdrawable"` + WithdrawableTs time.Time `json:"withdrawable_ts"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` +} + +// BuilderPageData holds data for the builder details page +type BuilderPageData struct { + CurrentEpoch uint64 `json:"current_epoch"` + Index uint64 `json:"index"` + Name string `json:"name"` + PublicKey []byte `json:"pubkey"` + Balance uint64 `json:"balance"` + ExecutionAddress []byte `json:"execution_address"` + Version uint8 `json:"version"` + State string `json:"state"` // "Active", "Exited", "Superseded" + + // Deposit lifecycle + ShowDeposit bool `json:"show_deposit"` + DepositEpoch uint64 `json:"deposit_epoch"` + DepositTs time.Time `json:"deposit_ts"` + + // Withdrawable lifecycle + ShowWithdrawable bool `json:"show_withdrawable"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + WithdrawableTs time.Time `json:"withdrawable_ts"` + + IsSuperseded bool `json:"is_superseded"` + + // Tab control + TabView string `json:"tab_view"` + GloasIsActive bool `json:"gloas_is_active"` + + // Tab data (loaded conditionally) + RecentBlocks []*BuilderPageDataBlock `json:"recent_blocks"` + RecentBids []*BuilderPageDataBid `json:"recent_bids"` + RecentDeposits []*BuilderPageDataDeposit `json:"recent_deposits"` +} + +// BuilderPageDataBlock represents a block/payload built by this builder +type BuilderPageDataBlock struct { + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + Ts time.Time `json:"ts"` + BlockRoot []byte `json:"block_root"` + BlockHash []byte `json:"block_hash"` + Status uint16 `json:"status"` // PayloadStatus + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` +} + +// BuilderPageDataBid represents a bid submitted by this builder +type BuilderPageDataBid struct { + Slot uint64 `json:"slot"` + Ts time.Time `json:"ts"` + ParentRoot []byte `json:"parent_root"` + ParentHash []byte `json:"parent_hash"` + BlockHash []byte `json:"block_hash"` + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` + IsWinning bool `json:"is_winning"` +} + +// BuilderPageDataDeposit represents a builder deposit or voluntary exit +type BuilderPageDataDeposit struct { + Type string `json:"type"` // "exit" + SlotNumber uint64 `json:"slot"` + SlotRoot []byte `json:"slot_root"` + Time time.Time `json:"time"` + Orphaned bool `json:"orphaned"` +} diff --git a/types/models/deposits.go b/types/models/deposits.go index 83e836d31..f8b027e2a 100644 --- a/types/models/deposits.go +++ b/types/models/deposits.go @@ -43,6 +43,7 @@ type DepositsPageDataInitiatedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataIncludedDeposit struct { @@ -69,6 +70,7 @@ type DepositsPageDataIncludedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataIncludedDepositTxDetails struct { @@ -98,6 +100,7 @@ type DepositsPageDataQueuedDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type DepositsPageDataQueuedDepositTxDetails struct { diff --git a/types/models/epoch.go b/types/models/epoch.go index a4ae2b8c2..f308436de 100644 --- a/types/models/epoch.go +++ b/types/models/epoch.go @@ -45,6 +45,7 @@ type EpochPageDataSlot struct { Ts time.Time `json:"ts"` Scheduled bool `json:"scheduled"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` AttestationCount uint64 `json:"attestation_count"` diff --git a/types/models/exits.go b/types/models/exits.go index 683aa2fa4..340d71f11 100644 --- a/types/models/exits.go +++ b/types/models/exits.go @@ -29,6 +29,7 @@ type ExitsPageDataRecentExit struct { Orphaned bool `json:"orphaned"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey"` WithdrawalCreds []byte `json:"wdcreds"` ValidatorStatus string `json:"vstatus"` diff --git a/types/models/included_deposits.go b/types/models/included_deposits.go index fd597f1d4..338292e81 100644 --- a/types/models/included_deposits.go +++ b/types/models/included_deposits.go @@ -62,6 +62,7 @@ type IncludedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type IncludedDepositsPageDataDepositTxDetails struct { diff --git a/types/models/indexPage.go b/types/models/indexPage.go index 288c98f6b..3e6a56294 100644 --- a/types/models/indexPage.go +++ b/types/models/indexPage.go @@ -68,29 +68,31 @@ type IndexPageDataEpochs struct { } type IndexPageDataBlocks struct { - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` - WithEthBlock bool `json:"has_block"` - EthBlock uint64 `json:"eth_block"` - EthBlockLink string `json:"eth_link"` - Ts time.Time `json:"ts"` - Proposer uint64 `json:"proposer"` - ProposerName string `json:"proposer_name"` - Status uint64 `json:"status"` - BlockRoot []byte `json:"block_root"` + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + WithEthBlock bool `json:"has_block"` + EthBlock uint64 `json:"eth_block"` + EthBlockLink string `json:"eth_link"` + Ts time.Time `json:"ts"` + Proposer uint64 `json:"proposer"` + ProposerName string `json:"proposer_name"` + Status uint64 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` + BlockRoot []byte `json:"block_root"` } type IndexPageDataSlots struct { - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` - EthBlock uint64 `json:"eth_block"` - Ts time.Time `json:"ts"` - Proposer uint64 `json:"proposer"` - ProposerName string `json:"proposer_name"` - Status uint64 `json:"status"` - BlockRoot []byte `json:"block_root"` - ParentRoot []byte `json:"-"` - ForkGraph []*IndexPageDataForkGraph `json:"fork_graph"` + Epoch uint64 `json:"epoch"` + Slot uint64 `json:"slot"` + EthBlock uint64 `json:"eth_block"` + Ts time.Time `json:"ts"` + Proposer uint64 `json:"proposer"` + ProposerName string `json:"proposer_name"` + Status uint64 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` + BlockRoot []byte `json:"block_root"` + ParentRoot []byte `json:"-"` + ForkGraph []*IndexPageDataForkGraph `json:"fork_graph"` } type IndexPageDataForkGraph struct { diff --git a/types/models/initiated_deposits.go b/types/models/initiated_deposits.go index 5f1c91fc0..8bfb5558f 100644 --- a/types/models/initiated_deposits.go +++ b/types/models/initiated_deposits.go @@ -53,4 +53,5 @@ type InitiatedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } diff --git a/types/models/queued_deposits.go b/types/models/queued_deposits.go index c5bfc90ae..351805744 100644 --- a/types/models/queued_deposits.go +++ b/types/models/queued_deposits.go @@ -54,6 +54,7 @@ type QueuedDepositsPageDataDeposit struct { ValidatorExists bool `json:"validator_exists"` ValidatorIndex uint64 `json:"validator_index"` ValidatorName string `json:"validator_name"` + IsBuilder bool `json:"is_builder"` } type QueuedDepositsPageDataDepositTxDetails struct { diff --git a/types/models/slot.go b/types/models/slot.go index 6d67fbc39..d94c6dc4f 100644 --- a/types/models/slot.go +++ b/types/models/slot.go @@ -77,8 +77,12 @@ type SlotPageBlockData struct { DepositRequestsCount uint64 `json:"deposit_receipts_count"` WithdrawalRequestsCount uint64 `json:"withdrawal_requests_count"` ConsolidationRequestsCount uint64 `json:"consolidation_requests_count"` + BidsCount uint64 `json:"bids_count"` + PtcVotesCount uint64 `json:"ptc_votes_count"` + + PayloadHeader *SlotPagePayloadHeader `json:"payload_header"` + ExecutionData *SlotPageExecutionData `json:"execution_data"` - ExecutionData *SlotPageExecutionData `json:"execution_data"` Attestations []*SlotPageAttestation `json:"attestations"` // Attestations included in this block Deposits []*SlotPageDeposit `json:"deposits"` // Deposits included in this block VoluntaryExits []*SlotPageVoluntaryExit `json:"voluntary_exits"` // Voluntary Exits included in this block @@ -91,6 +95,8 @@ type SlotPageBlockData struct { DepositRequests []*SlotPageDepositRequest `json:"deposit_receipts"` // DepositRequests included in this block WithdrawalRequests []*SlotPageWithdrawalRequest `json:"withdrawal_requests"` // WithdrawalRequests included in this block ConsolidationRequests []*SlotPageConsolidationRequest `json:"consolidation_requests"` // ConsolidationRequests included in this block + Bids []*SlotPageBid `json:"bids"` // Execution payload bids for this block (ePBS) + PtcVotes *SlotPagePtcVotes `json:"ptc_votes"` // PTC votes included in this block (for previous slot) } type SlotPageExecutionData struct { @@ -118,6 +124,20 @@ type SlotPageExecutionData struct { HasExecData bool `json:"has_exec_data"` } +type SlotPagePayloadHeader struct { + PayloadStatus uint16 `json:"payload_status"` + ParentBlockHash []byte `json:"parent_block_hash"` + ParentBlockRoot []byte `json:"parent_block_root"` + BlockHash []byte `json:"block_hash"` + GasLimit uint64 `json:"gas_limit"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` + Slot uint64 `json:"slot"` + Value uint64 `json:"value"` + BlobKZGCommitments [][]byte `json:"blob_kzg_commitments"` + Signature []byte `json:"signature"` +} + type SlotPageAttestation struct { Slot uint64 `json:"slot"` CommitteeIndex []uint64 `json:"committeeindex"` @@ -147,6 +167,7 @@ type SlotPageDeposit struct { type SlotPageVoluntaryExit struct { ValidatorIndex uint64 `json:"validatorindex"` ValidatorName string `json:"validatorname"` + IsBuilder bool `json:"is_builder"` Epoch uint64 `json:"epoch"` Signature []byte `json:"signature"` } @@ -279,3 +300,40 @@ type SlotPageConsolidationRequest struct { TargetName string `db:"target_name"` Epoch uint64 `db:"epoch"` } + +type SlotPageBid struct { + ParentRoot []byte `json:"parent_root"` + ParentHash []byte `json:"parent_hash"` + BlockHash []byte `json:"block_hash"` + FeeRecipient []byte `json:"fee_recipient"` + GasLimit uint64 `json:"gas_limit"` + BuilderIndex uint64 `json:"builder_index"` + BuilderName string `json:"builder_name"` + IsSelfBuilt bool `json:"is_self_built"` + Slot uint64 `json:"slot"` + Value uint64 `json:"value"` + ElPayment uint64 `json:"el_payment"` + TotalValue uint64 `json:"total_value"` + IsWinning bool `json:"is_winning"` +} + +// SlotPagePtcVotes holds PTC (Payload Timeliness Committee) vote information for a slot. +// These are payload attestations included in this block for the PREVIOUS slot. +type SlotPagePtcVotes struct { + VotedSlot uint64 `json:"voted_slot"` // The slot the votes are for (previous slot) + VotedBlockRoot []byte `json:"voted_block_root"` // The block root being voted on + TotalPtcSize uint64 `json:"total_ptc_size"` // Total PTC committee size + Aggregates []*SlotPagePtcAggregate `json:"aggregates"` // Up to 4 aggregates for different vote flag combinations + PtcCommittee []types.NamedValidator `json:"ptc_committee"` // Full PTC committee with participation status + Participation float64 `json:"participation"` // Overall participation rate +} + +// SlotPagePtcAggregate represents a single PTC vote aggregate for a specific vote flag combination. +type SlotPagePtcAggregate struct { + PayloadPresent bool `json:"payload_present"` // Whether the payload was present + BlobDataAvailable bool `json:"blob_data_available"` // Whether blob data was available + AggregationBits []byte `json:"aggregation_bits"` // Bitfield of participating validators + Validators []uint64 `json:"validators"` // Validator indices that voted + Signature []byte `json:"signature"` // Aggregate signature + VoteCount uint64 `json:"vote_count"` // Number of votes in this aggregate +} diff --git a/types/models/slots.go b/types/models/slots.go index 24d2d6ba6..cc56948f7 100644 --- a/types/models/slots.go +++ b/types/models/slots.go @@ -60,6 +60,7 @@ type SlotsPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Synchronized bool `json:"synchronized"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` diff --git a/types/models/slots_filtered.go b/types/models/slots_filtered.go index 786b7729f..4eeb7548e 100644 --- a/types/models/slots_filtered.go +++ b/types/models/slots_filtered.go @@ -80,6 +80,7 @@ type SlotsFilteredPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Synchronized bool `json:"synchronized"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` diff --git a/types/models/validator_slots.go b/types/models/validator_slots.go index daad9d8ca..9f4816268 100644 --- a/types/models/validator_slots.go +++ b/types/models/validator_slots.go @@ -34,6 +34,7 @@ type ValidatorSlotsPageDataSlot struct { Finalized bool `json:"scheduled"` Scheduled bool `json:"finalized"` Status uint8 `json:"status"` + PayloadStatus uint8 `json:"payload_status"` Proposer uint64 `json:"proposer"` ProposerName string `json:"proposer_name"` AttestationCount uint64 `json:"attestation_count"` diff --git a/types/models/voluntary_exits.go b/types/models/voluntary_exits.go index cd5ee2a16..2bfc2fae3 100644 --- a/types/models/voluntary_exits.go +++ b/types/models/voluntary_exits.go @@ -41,6 +41,7 @@ type VoluntaryExitsPageDataExit struct { Orphaned bool `json:"orphaned"` ValidatorIndex uint64 `json:"vindex"` ValidatorName string `json:"vname"` + IsBuilder bool `json:"is_builder"` PublicKey []byte `json:"pubkey"` WithdrawalCreds []byte `json:"wdcreds"` ValidatorStatus string `json:"vstatus"` diff --git a/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts b/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts index 9e292326f..f440f8f08 100644 --- a/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts +++ b/ui-package/src/components/SubmitDepositsForm/DepositGenerator.ts @@ -30,7 +30,7 @@ const SigningData = new ContainerType({ domain: new ByteVectorType(32), }); -export type CredentialType = '00' | '01' | '02'; +export type CredentialType = '00' | '01' | '02' | '03'; export interface WithdrawalCredentialConfig { type: CredentialType; @@ -66,10 +66,10 @@ export function validateMnemonicWords(mnemonic: string): boolean { /** * Build withdrawal credentials from type and ETH address - * @param credType - '01' for execution, '02' for compounding + * @param credType - '01' for execution, '02' for compounding, '03' for builder * @param address - 20-byte ETH address (0x prefixed) */ -export function buildWithdrawalCredentialsFromAddress(credType: '01' | '02', address: string): string { +export function buildWithdrawalCredentialsFromAddress(credType: '01' | '02' | '03', address: string): string { const cleanAddress = address.startsWith('0x') ? address.slice(2) : address; if (cleanAddress.length !== 40) { throw new Error("Invalid address length"); @@ -113,9 +113,9 @@ export async function buildWithdrawalCredentials( return buildBLSWithdrawalCredentials(withdrawalPubkey); } else { if (!config.address) { - throw new Error("Address required for 0x01/0x02 credentials"); + throw new Error("Address required for 0x01/0x02/0x03 credentials"); } - return buildWithdrawalCredentialsFromAddress(config.type, config.address); + return buildWithdrawalCredentialsFromAddress(config.type as '01' | '02' | '03', config.address); } } diff --git a/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx b/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx index c71180f85..ad2b073e8 100644 --- a/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx +++ b/ui-package/src/components/SubmitDepositsForm/DepositGeneratorModal.tsx @@ -30,8 +30,8 @@ interface IValidatorOverrideState { useCustomAmount: boolean; // Credential override fields credentialInputMode: CredentialInputMode; - credentialType: CredentialType; // '00', '01', '02' - withdrawalAddress: string; // For 0x01/0x02 + credentialType: CredentialType; // '00', '01', '02', '03' + withdrawalAddress: string; // For 0x01/0x02/0x03 rawCredentials: string; // For raw mode useCustomCredentials: boolean; } @@ -403,6 +403,7 @@ const DepositGeneratorModal: React.FC = (props) => +
{credentialType !== '00' && ( @@ -534,8 +535,9 @@ const DepositGeneratorModal: React.FC = (props) => + - {/* Address input (only for 0x01/0x02) */} + {/* Address input (only for 0x01/0x02/0x03) */} {override.credentialType !== '00' && ( %v", index)) } +func FormatBuilder(index uint64, name string) template.HTML { + return formatBuilder(index, name, "fa-hard-hat mr-2", false) +} + +func FormatBuilderWithIndex(index uint64, name string) template.HTML { + return formatBuilder(index, name, "fa-hard-hat mr-2", true) +} + +func formatBuilder(index uint64, name string, icon string, withIndex bool) template.HTML { + if index == math.MaxUint64 { + return template.HTML(fmt.Sprintf(" Self-built", icon)) + } else if name != "" { + var nameLabel string + if withIndex { + nameLabel = fmt.Sprintf("%v (%v)", html.EscapeString(name), index) + } else { + nameLabel = html.EscapeString(name) + } + return template.HTML(fmt.Sprintf(" %v", index, icon, index, nameLabel)) + } + return template.HTML(fmt.Sprintf(" %v", icon, index, index)) +} + func FormatRecentTimeShort(ts time.Time) template.HTML { duration := time.Until(ts) var timeStr string @@ -866,6 +889,8 @@ func formatWithdrawalHash(hash []byte) template.HTML { colorClass = "text-success" } else if hash[0] == 0x02 { colorClass = "text-info" + } else if hash[0] == 0x03 { + colorClass = "text-primary" } else { colorClass = "text-warning" } @@ -878,8 +903,8 @@ func FormatWithdawalCredentials(hash []byte) template.HTML { return "INVALID CREDENTIALS" } - // For 0x01 or 0x02 credentials, link to the address - if hash[0] == 0x01 || hash[0] == 0x02 { + // For 0x01, 0x02 or 0x03 credentials, link to the address + if hash[0] == 0x01 || hash[0] == 0x02 || hash[0] == 0x03 { addr := fmt.Sprintf("0x%x", hash[12:]) // Use local link when execution indexer is enabled diff --git a/utils/templateFucs.go b/utils/templateFucs.go index 96a7518d8..fb5871fdd 100644 --- a/utils/templateFucs.go +++ b/utils/templateFucs.go @@ -121,6 +121,8 @@ func GetTemplateFuncs() template.FuncMap { "formatValidatorWithIndex": FormatValidatorWithIndex, "formatValidatorNameWithIndex": FormatValidatorNameWithIndex, "formatSlashedValidator": FormatSlashedValidator, + "formatBuilder": FormatBuilder, + "formatBuilderWithIndex": FormatBuilderWithIndex, "formatWithdawalCredentials": FormatWithdawalCredentials, "formatRecentTimeShort": FormatRecentTimeShort, "formatGraffiti": FormatGraffiti,