Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions beacon_chain/gossip_processing/block_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ proc storeBackfillBlock(
# Only store data columns after successfully establishing block viability.
let columns = dataColumnsOpt.valueOr: DataColumnSidecars @[]
debug "Inserting columns into database (backfill)",
indices = columns.mapIt($it[].index)
indices = columns.mapIt($it[].index).len
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mapIt will cause the allocation of an extra seq and some extra processing - if we're just getting the length, this is wasteful

for i in 0..<columns.len:
self.consensusManager.dag.db.putDataColumnSidecar(columns[i][])

Expand Down Expand Up @@ -757,7 +757,7 @@ proc storeBlock(
# write data columns now that block has been written
let data_columns = dataColumnsOpt.valueOr: DataColumnSidecars @[]
debug "Inserting columns into database",
indices = data_columns.mapIt($it.index)
indices = data_columns.mapIt($it.index).len
for col in data_columns:
self.consensusManager.dag.db.putDataColumnSidecar(col[])

Expand Down
8 changes: 6 additions & 2 deletions beacon_chain/networking/eth2_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -2511,8 +2511,10 @@ proc lookupCgcFromPeer*(peer: Peer): uint64 =

let metadata = peer.metadata
if metadata.isOk:
return metadata.get.custody_group_count

return (if metadata.get.custody_group_count > NUMBER_OF_COLUMNS:
0'u64
else:
metadata.get.custody_group_count)
# Try getting the custody count from ENR if metadata fetch fails.
debug "Could not get cgc from metadata, trying from ENR",
peer_id = peer.peerId
Expand All @@ -2523,6 +2525,8 @@ proc lookupCgcFromPeer*(peer: Peer): uint64 =
if enrFieldOpt.isOk:
try:
let cgc = SSZ.decode(enrFieldOpt.get, uint8)
if cgc > NUMBER_OF_COLUMNS:
return 0'u64
return cgc.uint64
except SszError, SerializationError:
discard # Ignore decoding errors and fallback to default
Expand Down
19 changes: 12 additions & 7 deletions beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1713,7 +1713,7 @@ proc reconstructDataColumns(node: BeaconNode, slot: Slot) =
logScope:
slot = slot

let blck = node.dag.getForkedBlock(node.dag.finalizedHead.blck.bid).valueOr:
let blck = node.dag.getForkedBlock(node.dag.head.bid).valueOr:
warn "Failed to get the current slot head"
return

Expand All @@ -1730,7 +1730,7 @@ proc reconstructDataColumns(node: BeaconNode, slot: Slot) =
if node.dag.db.getDataColumnSidecar(forkyBlck.root, i, colData):
columns.add(newClone(colData))
indices.incl(i)
debug "Stored data columns", indices
debug "Stored data columns", columns = indices.len

# Make sure the node has obtained 50%+ of all the columns
if columns.lenu64 < (maxColCount div 2):
Expand All @@ -1747,6 +1747,7 @@ proc reconstructDataColumns(node: BeaconNode, slot: Slot) =
error "Error in data column reconstruction"
return
let rowCount = recovered.len
var reconCounter = 0
for i in 0 ..< maxColCount:
if i in indices:
continue
Expand All @@ -1758,13 +1759,17 @@ proc reconstructDataColumns(node: BeaconNode, slot: Slot) =
proofs[j] = recovered[j].proofs[i]
let dataColumn = fulu.DataColumnSidecar(
index: ColumnIndex(i),
column: DataColumn(cells),
kzg_proofs: deneb.KzgProofs(proofs),
signed_block_header: forkyBlck.asSigned().toSignedBeaconBlockHeader())
column: DataColumn.init(cells),
kzg_commitments: columns[0].kzg_commitments,
kzg_proofs: deneb.KzgProofs.init(proofs),
signed_block_header: forkyBlck.asSigned().toSignedBeaconBlockHeader(),
kzg_commitments_inclusion_proof:
columns[0].kzg_commitments_inclusion_proof)
node.dag.db.putDataColumnSidecar(dataColumn)
inc reconCounter

debug "Column reconstructed",
len = maxColCount - indices.lenu64
debug "Columns reconstructed",
columns = reconCounter

proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
# Things we do when slot processing has ended and we're about to wait for the
Expand Down
3 changes: 2 additions & 1 deletion beacon_chain/spec/peerdas_helpers.nim
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ func handle_custody_groups(cfg: RuntimeConfig, node_id: NodeId,
custody_groups: HashSet[CustodyIndex]
current_id = node_id

while custody_groups.lenu64 < custody_group_count:
let safe_count = min(custody_group_count, cfg.NUMBER_OF_CUSTODY_GROUPS)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/fulu/das-core.md#get_custody_groups defines it as an error to do this:

    assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS

    # Skip computation if all groups are custodied
    if custody_group_count == NUMBER_OF_CUSTODY_GROUPS:
        return [CustodyIndex(i) for i in range(NUMBER_OF_CUSTODY_GROUPS)]

but that would make things more complicated to return anything like an Opt or Result, so there needs to be some default here. But sure, it's fine to effectively set this error handling as clamping.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we can need the peer for blocks?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we can need the peer for blocks?

That's compatible with not thinking it has any custody at all.

while custody_groups.lenu64 < safe_count:
var hashed_bytes: array[8, byte]

let
Expand Down
Loading