diff --git a/beacon-chain/blockchain/kzg/kzg.go b/beacon-chain/blockchain/kzg/kzg.go index f7831ea59d78..1b1d7cce5004 100644 --- a/beacon-chain/blockchain/kzg/kzg.go +++ b/beacon-chain/blockchain/kzg/kzg.go @@ -34,12 +34,6 @@ type Bytes48 = ckzg4844.Bytes48 // Bytes32 is a 32-byte array. type Bytes32 = ckzg4844.Bytes32 -// CellsAndProofs represents the Cells and Proofs corresponding to a single blob. -type CellsAndProofs struct { - Cells []Cell - Proofs []Proof -} - // BlobToKZGCommitment computes a KZG commitment from a given blob. func BlobToKZGCommitment(blob *Blob) (Commitment, error) { var kzgBlob kzg4844.Blob @@ -78,22 +72,29 @@ func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) { proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment)) if err != nil { - return [48]byte{}, err + return Proof{}, err } - return Proof(proof), nil + return Proof(proof[:]), nil } // ComputeCellsAndKZGProofs computes the cells and cells KZG proofs from a given blob. -func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) { +func ComputeCellsAndKZGProofs(blob *Blob) ([]Cell, []Proof, error) { var ckzgBlob ckzg4844.Blob copy(ckzgBlob[:], blob[:]) ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(&ckzgBlob) if err != nil { - return CellsAndProofs{}, err + return nil, nil, err + } + + cells := make([]Cell, len(ckzgCells)) + proofs := make([]Proof, len(ckzgProofs)) + for i := range ckzgCells { + cells[i] = Cell(ckzgCells[i]) + proofs[i] = Proof(ckzgProofs[i][:]) } - return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:]) + return cells, proofs, nil } // VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs. @@ -108,39 +109,48 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes) } -// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells. +// RecoverCells recovers the complete cells from a given set of cell indices and partial cells. // Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order. -func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) { +func RecoverCells(cellIndices []uint64, partialCells []Cell) ([]Cell, error) { // Convert `Cell` type to `ckzg4844.Cell` ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells)) for i := range partialCells { ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i]) } - ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells) + ckzgCells, err := ckzg4844.RecoverCells(cellIndices, ckzgPartialCells) if err != nil { - return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs") + return nil, errors.Wrap(err, "recover cells") + } + + cells := make([]Cell, len(ckzgCells)) + for i := range ckzgCells { + cells[i] = Cell(ckzgCells[i]) } - return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:]) + return cells, nil } -// makeCellsAndProofs converts cells/proofs to the CellsAndProofs type defined in this package. -func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) { - if len(ckzgCells) != len(ckzgProofs) { - return CellsAndProofs{}, errors.New("different number of cells/proofs") +// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells. +// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order. +func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) ([]Cell, []Proof, error) { + // Convert `Cell` type to `ckzg4844.Cell` + ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells)) + for i := range partialCells { + ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i]) } - cells := make([]Cell, 0, len(ckzgCells)) - proofs := make([]Proof, 0, len(ckzgProofs)) + ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells) + if err != nil { + return nil, nil, errors.Wrap(err, "recover cells and KZG proofs") + } + cells := make([]Cell, len(ckzgCells)) + proofs := make([]Proof, len(ckzgProofs)) for i := range ckzgCells { - cells = append(cells, Cell(ckzgCells[i])) - proofs = append(proofs, Proof(ckzgProofs[i])) + cells[i] = Cell(ckzgCells[i]) + proofs[i] = Proof(ckzgProofs[i][:]) } - return CellsAndProofs{ - Cells: cells, - Proofs: proofs, - }, nil + return cells, proofs, nil } diff --git a/beacon-chain/blockchain/kzg/validation_test.go b/beacon-chain/blockchain/kzg/validation_test.go index ddfcb0eb089e..261c81b70c35 100644 --- a/beacon-chain/blockchain/kzg/validation_test.go +++ b/beacon-chain/blockchain/kzg/validation_test.go @@ -203,13 +203,13 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) { require.NoError(t, err) // Compute cells and proofs - cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob) + _, proofs, err := ComputeCellsAndKZGProofs(&blob) require.NoError(t, err) // Create flattened cell proofs (like execution client format) cellProofs := make([][]byte, numberOfColumns) for i := range numberOfColumns { - cellProofs[i] = cellsAndProofs.Proofs[i][:] + cellProofs[i] = proofs[i][:] } blobs := [][]byte{blob[:]} @@ -236,7 +236,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) { require.NoError(t, err) // Compute cells and proofs - cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob) + _, proofs, err := ComputeCellsAndKZGProofs(&blob) require.NoError(t, err) blobs[i] = blob[:] @@ -244,7 +244,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) { // Add cell proofs for this blob for j := range numberOfColumns { - allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:]) + allCellProofs = append(allCellProofs, proofs[j][:]) } } @@ -319,7 +319,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) { randBlob := random.GetRandBlob(123) var blob Blob copy(blob[:], randBlob[:]) - cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob) + _, proofs, err := ComputeCellsAndKZGProofs(&blob) require.NoError(t, err) // Generate wrong commitment from different blob @@ -331,7 +331,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) { cellProofs := make([][]byte, numberOfColumns) for i := range numberOfColumns { - cellProofs[i] = cellsAndProofs.Proofs[i][:] + cellProofs[i] = proofs[i][:] } blobs := [][]byte{blob[:]} diff --git a/beacon-chain/core/peerdas/p2p_interface_test.go b/beacon-chain/core/peerdas/p2p_interface_test.go index 882690af712d..76114ebcceb2 100644 --- a/beacon-chain/core/peerdas/p2p_interface_test.go +++ b/beacon-chain/core/peerdas/p2p_interface_test.go @@ -387,10 +387,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.ROData sBlock, err := blocks.NewSignedBeaconBlock(dbBlock) require.NoError(t, err) - cellsAndProofs := util.GenerateCellsAndProofs(t, blobs) + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) rob, err := blocks.NewROBlock(sBlock) require.NoError(t, err) - sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.NoError(t, err) return sidecars diff --git a/beacon-chain/core/peerdas/reconstruction.go b/beacon-chain/core/peerdas/reconstruction.go index 5a9663bace34..b7d3b848b779 100644 --- a/beacon-chain/core/peerdas/reconstruction.go +++ b/beacon-chain/core/peerdas/reconstruction.go @@ -28,6 +28,56 @@ func MinimumColumnCountToReconstruct() uint64 { return (params.BeaconConfig().NumberOfColumns + 1) / 2 } +// recoverCellsForBlobs reconstructs cells for all blobs from the given data column sidecars. +// When withProofs is true, it returns both cells and proofs. When false, it only returns cells (optimized). +func recoverCellsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blobCount int, withProofs bool) ([][]kzg.Cell, [][]kzg.Proof, error) { + sidecarCount := len(verifiedRoSidecars) + var wg errgroup.Group + + cellsPerBlob := make([][]kzg.Cell, blobCount) + var proofsPerBlob [][]kzg.Proof + if withProofs { + proofsPerBlob = make([][]kzg.Proof, blobCount) + } + + for blobIndex := range uint64(blobCount) { + wg.Go(func() error { + cellsIndices := make([]uint64, 0, sidecarCount) + cells := make([]kzg.Cell, 0, sidecarCount) + + for _, sidecar := range verifiedRoSidecars { + cell := sidecar.Column[blobIndex] + cells = append(cells, kzg.Cell(cell)) + cellsIndices = append(cellsIndices, sidecar.Index) + } + + if withProofs { + recoveredCells, recoveredProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells) + if err != nil { + return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex) + } + cellsPerBlob[blobIndex] = recoveredCells + proofsPerBlob[blobIndex] = recoveredProofs + } else { + recoveredCells, err := kzg.RecoverCells(cellsIndices, cells) + if err != nil { + return errors.Wrapf(err, "recover cells for blob %d", blobIndex) + } + cellsPerBlob[blobIndex] = recoveredCells + } + return nil + }) + } + + if err := wg.Wait(); err != nil { + if withProofs { + return nil, nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs") + } + return nil, nil, errors.Wrap(err, "wait for RecoverCells") + } + return cellsPerBlob, proofsPerBlob, nil +} + // ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars. // All input sidecars must be committed to the same block. // `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate. @@ -66,38 +116,12 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol }) // Recover cells and compute proofs in parallel. - var wg errgroup.Group - cellsAndProofs := make([]kzg.CellsAndProofs, blobCount) - for blobIndex := range uint64(blobCount) { - wg.Go(func() error { - cellsIndices := make([]uint64, 0, sidecarCount) - cells := make([]kzg.Cell, 0, sidecarCount) - - for _, sidecar := range verifiedRoSidecars { - cell := sidecar.Column[blobIndex] - cells = append(cells, kzg.Cell(cell)) - cellsIndices = append(cellsIndices, sidecar.Index) - } - - // Recover the cells and proofs for the corresponding blob - cellsAndProofsForBlob, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells) - - if err != nil { - return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex) - } - - // It is safe for multiple goroutines to concurrently write to the same slice, - // as long as they are writing to different indices, which is the case here. - cellsAndProofs[blobIndex] = cellsAndProofsForBlob - return nil - }) - } - - if err := wg.Wait(); err != nil { - return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs") + cellsPerBlob, proofsPerBlob, err := recoverCellsForBlobs(verifiedRoSidecars, blobCount, true) + if err != nil { + return nil, err } - outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar)) + outSidecars, err := DataColumnSidecars(cellsPerBlob, proofsPerBlob, PopulateFromSidecar(referenceSidecar)) if err != nil { return nil, errors.Wrap(err, "data column sidecars from items") } @@ -113,18 +137,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol return reconstructedVerifiedRoSidecars, nil } -// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars. -// The following constraints must be satisfied: -// - All `dataColumnSidecars` has to be committed to the same block, and -// - `dataColumnSidecars` must be sorted by index and should not contain duplicates. -// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs, -// or either enough sidecars to reconstruct the blobs. -func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) { - // Return early if no blobs are requested. - if len(indices) == 0 { - return nil, nil - } - +// validateAndPrepareDataColumns validates the input data column sidecars and returns the prepared sidecars +// (reconstructed if necessary). This function performs common validation and reconstruction logic used by +// both ReconstructBlobs and ReconstructBlobsData. +func validateAndPrepareDataColumns(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) { if len(verifiedDataColumnSidecars) == 0 { return nil, ErrNotEnoughDataColumnSidecars } @@ -146,6 +162,34 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks. return nil, ErrNotEnoughDataColumnSidecars } + // If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct. + if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) { + return verifiedDataColumnSidecars, nil + } + + // We need to reconstruct the data column sidecars. + return ReconstructDataColumnSidecars(verifiedDataColumnSidecars) +} + +// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars. +// The following constraints must be satisfied: +// - All `dataColumnSidecars` has to be committed to the same block, and +// - `dataColumnSidecars` must be sorted by index and should not contain duplicates. +// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs, +// - either enough sidecars to reconstruct the blobs. +func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) { + // Return early if no blobs are requested. + if len(indices) == 0 { + return nil, nil + } + + // Validate and prepare data columns (reconstruct if necessary). + // This also checks if input is empty. + preparedDataColumnSidecars, err := validateAndPrepareDataColumns(verifiedDataColumnSidecars) + if err != nil { + return nil, err + } + // Check if the blob index is too high. commitments, err := block.Block().Body().BlobKzgCommitments() if err != nil { @@ -159,8 +203,8 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks. } // Check if the data column sidecars are aligned with the block. - dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars)) - for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars { + dataColumnSidecars := make([]blocks.RODataColumn, 0, len(preparedDataColumnSidecars)) + for _, verifiedDataColumnSidecar := range preparedDataColumnSidecars { dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar) } @@ -169,25 +213,8 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks. return nil, errors.Wrap(err, "data columns align with block") } - // If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct. - if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) { - // Convert verified data column sidecars to verified blob sidecars. - blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, verifiedDataColumnSidecars, indices) - if err != nil { - return nil, errors.Wrap(err, "blob sidecars from data column sidecars") - } - - return blobSidecars, nil - } - - // We need to reconstruct the data column sidecars. - reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars) - if err != nil { - return nil, errors.Wrap(err, "reconstruct data column sidecars") - } - // Convert verified data column sidecars to verified blob sidecars. - blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, reconstructedDataColumnSidecars, indices) + blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, preparedDataColumnSidecars, indices) if err != nil { return nil, errors.Wrap(err, "blob sidecars from data column sidecars") } @@ -196,86 +223,191 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks. } // ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs. -func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) { +func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) { numberOfColumns := params.BeaconConfig().NumberOfColumns blobCount := uint64(len(blobs)) cellProofsCount := uint64(len(cellProofs)) cellsCount := blobCount * numberOfColumns if cellsCount != cellProofsCount { - return nil, ErrBlobsCellsProofsMismatch + return nil, nil, ErrBlobsCellsProofsMismatch } - cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount) + cellsPerBlob := make([][]kzg.Cell, 0, blobCount) + proofsPerBlob := make([][]kzg.Proof, 0, blobCount) for i, blob := range blobs { var kzgBlob kzg.Blob if copy(kzgBlob[:], blob) != len(kzgBlob) { - return nil, errors.New("wrong blob size - should never happen") + return nil, nil, errors.New("wrong blob size - should never happen") } // Compute the extended cells from the (non-extended) blob. cells, err := kzg.ComputeCells(&kzgBlob) if err != nil { - return nil, errors.Wrap(err, "compute cells") + return nil, nil, errors.Wrap(err, "compute cells") } var proofs []kzg.Proof for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ { var kzgProof kzg.Proof if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) { - return nil, errors.New("wrong KZG proof size - should never happen") + return nil, nil, errors.New("wrong KZG proof size - should never happen") } proofs = append(proofs, kzgProof) } - cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs} - cellsAndProofs = append(cellsAndProofs, cellsProofs) + cellsPerBlob = append(cellsPerBlob, cells) + proofsPerBlob = append(proofsPerBlob, proofs) } - return cellsAndProofs, nil + return cellsPerBlob, proofsPerBlob, nil } // ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs. -func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) { +func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) { numberOfColumns := params.BeaconConfig().NumberOfColumns - cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs)) + cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs)) + proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs)) for _, blobAndProof := range blobsAndProofs { if blobAndProof == nil { - return nil, ErrNilBlobAndProof + return nil, nil, ErrNilBlobAndProof } var kzgBlob kzg.Blob if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) { - return nil, errors.New("wrong blob size - should never happen") + return nil, nil, errors.New("wrong blob size - should never happen") } // Compute the extended cells from the (non-extended) blob. cells, err := kzg.ComputeCells(&kzgBlob) if err != nil { - return nil, errors.Wrap(err, "compute cells") + return nil, nil, errors.Wrap(err, "compute cells") } kzgProofs := make([]kzg.Proof, 0, numberOfColumns) for _, kzgProofBytes := range blobAndProof.KzgProofs { if len(kzgProofBytes) != kzg.BytesPerProof { - return nil, errors.New("wrong KZG proof size - should never happen") + return nil, nil, errors.New("wrong KZG proof size - should never happen") } var kzgProof kzg.Proof if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) { - return nil, errors.New("wrong copied KZG proof size - should never happen") + return nil, nil, errors.New("wrong copied KZG proof size - should never happen") } kzgProofs = append(kzgProofs, kzgProof) } - cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs} - cellsAndProofs = append(cellsAndProofs, cellsProofs) + cellsPerBlob = append(cellsPerBlob, cells) + proofsPerBlob = append(proofsPerBlob, kzgProofs) + } + + return cellsPerBlob, proofsPerBlob, nil +} + +// ReconstructBlobsData reconstructs blob data from data column sidecars without computing KZG proofs or creating sidecars. +// This is an optimized version for when only the blob data is needed (e.g., for the GetBlobs endpoint). +// The following constraints must be satisfied: +// - All `dataColumnSidecars` must be committed to the same block, and +// - `dataColumnSidecars` must be sorted by index and should not contain duplicates. +// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs, +// - or enough sidecars to reconstruct the blobs. +func ReconstructBlobsData(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int, blobCount int) ([][]byte, error) { + // If no specific indices are requested, populate with all blob indices. + if len(indices) == 0 { + indices = make([]int, blobCount) + for i := range indices { + indices[i] = i + } + } + + if len(verifiedDataColumnSidecars) == 0 { + return nil, ErrNotEnoughDataColumnSidecars + } + + // Check if the sidecars are sorted by index and do not contain duplicates. + previousColumnIndex := verifiedDataColumnSidecars[0].Index + for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] { + columnIndex := dataColumnSidecar.Index + if columnIndex <= previousColumnIndex { + return nil, ErrDataColumnSidecarsNotSortedByIndex + } + + previousColumnIndex = columnIndex + } + + // Check if we have enough columns. + cellsPerBlob := fieldparams.CellsPerBlob + if len(verifiedDataColumnSidecars) < cellsPerBlob { + return nil, ErrNotEnoughDataColumnSidecars + } + + // Verify that the actual blob count from the first sidecar matches the expected count + referenceSidecar := verifiedDataColumnSidecars[0] + actualBlobCount := len(referenceSidecar.Column) + if actualBlobCount != blobCount { + return nil, errors.Errorf("blob count mismatch: expected %d, got %d", blobCount, actualBlobCount) + } + + // Check if the blob index is too high. + for _, blobIndex := range indices { + if blobIndex >= blobCount { + return nil, ErrBlobIndexTooHigh + } + } + + // Check if all columns have the same length and are committed to the same block. + blockRoot := referenceSidecar.BlockRoot() + for _, sidecar := range verifiedDataColumnSidecars[1:] { + if len(sidecar.Column) != blobCount { + return nil, ErrColumnLengthsDiffer + } + + if sidecar.BlockRoot() != blockRoot { + return nil, ErrBlockRootMismatch + } + } + + // Check if we have all non-extended columns (0..63) - if so, no reconstruction needed. + hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) + + var reconstructedCells [][]kzg.Cell + if !hasAllNonExtendedColumns { + // Need to reconstruct cells (but NOT proofs) for each blob. + var err error + reconstructedCells, _, err = recoverCellsForBlobs(verifiedDataColumnSidecars, blobCount, false) + if err != nil { + return nil, err + } + } + + // Extract blob data without computing proofs. + blobs := make([][]byte, 0, len(indices)) + for _, blobIndex := range indices { + var blob kzg.Blob + + // Compute the content of the blob. + for columnIndex := range cellsPerBlob { + var cell []byte + if hasAllNonExtendedColumns { + // Use existing cells from sidecars + cell = verifiedDataColumnSidecars[columnIndex].Column[blobIndex] + } else { + // Use reconstructed cells + cell = reconstructedCells[blobIndex][columnIndex][:] + } + + if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell { + return nil, errors.New("wrong cell size - should never happen") + } + } + + blobs = append(blobs, blob[:]) } - return cellsAndProofs, nil + return blobs, nil } // blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars. diff --git a/beacon-chain/core/peerdas/reconstruction_test.go b/beacon-chain/core/peerdas/reconstruction_test.go index 88fe79d1786c..c949f2eae342 100644 --- a/beacon-chain/core/peerdas/reconstruction_test.go +++ b/beacon-chain/core/peerdas/reconstruction_test.go @@ -207,7 +207,8 @@ func TestReconstructBlobs(t *testing.T) { // Compute cells and proofs from blob sidecars. var wg errgroup.Group blobs := make([][]byte, blobCount) - inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount) + inputCellsPerBlob := make([][]kzg.Cell, blobCount) + inputProofsPerBlob := make([][]kzg.Proof, blobCount) for i := range blobCount { blob := roBlobSidecars[i].Blob blobs[i] = blob @@ -217,14 +218,15 @@ func TestReconstructBlobs(t *testing.T) { count := copy(kzgBlob[:], blob) require.Equal(t, len(kzgBlob), count) - cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) if err != nil { return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i) } // It is safe for multiple goroutines to concurrently write to the same slice, // as long as they are writing to different indices, which is the case here. - inputCellsAndProofs[i] = cp + inputCellsPerBlob[i] = cells + inputProofsPerBlob[i] = proofs return nil }) @@ -235,18 +237,18 @@ func TestReconstructBlobs(t *testing.T) { // Flatten proofs. cellProofs := make([][]byte, 0, blobCount*numberOfColumns) - for _, cp := range inputCellsAndProofs { - for _, proof := range cp.Proofs { + for _, proofs := range inputProofsPerBlob { + for _, proof := range proofs { cellProofs = append(cellProofs, proof[:]) } } // Compute celles and proofs from the blobs and cell proofs. - cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) + cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) require.NoError(t, err) // Construct data column sidears from the signed block and cells and proofs. - roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) require.NoError(t, err) // Convert to verified data column sidecars. @@ -296,6 +298,253 @@ func TestReconstructBlobs(t *testing.T) { } +func TestReconstructBlobsData(t *testing.T) { + params.SetupTestConfigCleanup(t) + params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2 + + require.NoError(t, kzg.Start()) + fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch) + + t.Run("empty indices with blobCount > 0", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + // Generate data column sidecars + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Call with empty indices - should return all blobs + reconstructedBlobs, err := peerdas.ReconstructBlobsData(verifiedRoSidecars, []int{}, blobCount) + require.NoError(t, err) + require.Equal(t, blobCount, len(reconstructedBlobs)) + + // Verify each blob matches + for i := 0; i < blobCount; i++ { + require.DeepEqual(t, blobs[i][:], reconstructedBlobs[i]) + } + }) + + t.Run("specific indices", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Request only blobs at indices 0 and 2 + indices := []int{0, 2} + reconstructedBlobs, err := peerdas.ReconstructBlobsData(verifiedRoSidecars, indices, blobCount) + require.NoError(t, err) + require.Equal(t, len(indices), len(reconstructedBlobs)) + + // Verify requested blobs match + for i, blobIndex := range indices { + require.DeepEqual(t, blobs[blobIndex][:], reconstructedBlobs[i]) + } + }) + + t.Run("blob count mismatch", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Pass wrong blob count + wrongBlobCount := 5 + _, err = peerdas.ReconstructBlobsData(verifiedRoSidecars, []int{0}, wrongBlobCount) + require.ErrorContains(t, "blob count mismatch", err) + }) + + t.Run("empty data columns", func(t *testing.T) { + _, err := peerdas.ReconstructBlobsData([]blocks.VerifiedRODataColumn{}, []int{0}, 1) + require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars) + }) + + t.Run("index too high", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Request blob index that's too high + _, err = peerdas.ReconstructBlobsData(verifiedRoSidecars, []int{blobCount}, blobCount) + require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh) + }) + + t.Run("not enough columns", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Only provide 63 columns (need at least 64) + inputSidecars := verifiedRoSidecars[:fieldparams.CellsPerBlob-1] + _, err = peerdas.ReconstructBlobsData(inputSidecars, []int{0}, blobCount) + require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars) + }) + + t.Run("not sorted", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Swap two sidecars to make them unsorted + verifiedRoSidecars[3], verifiedRoSidecars[2] = verifiedRoSidecars[2], verifiedRoSidecars[3] + + _, err = peerdas.ReconstructBlobsData(verifiedRoSidecars, []int{0}, blobCount) + require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex) + }) + + t.Run("with reconstruction needed", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Keep only even-indexed columns (will need reconstruction) + filteredSidecars := make([]blocks.VerifiedRODataColumn, 0, len(verifiedRoSidecars)/2) + for i := 0; i < len(verifiedRoSidecars); i += 2 { + filteredSidecars = append(filteredSidecars, verifiedRoSidecars[i]) + } + + // Reconstruct all blobs + reconstructedBlobs, err := peerdas.ReconstructBlobsData(filteredSidecars, []int{}, blobCount) + require.NoError(t, err) + require.Equal(t, blobCount, len(reconstructedBlobs)) + + // Verify all blobs match + for i := range blobCount { + require.DeepEqual(t, blobs[i][:], reconstructedBlobs[i]) + } + }) + + t.Run("no reconstruction needed - all non-extended columns present", func(t *testing.T) { + const blobCount = 3 + _, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount) + + blobs := make([]kzg.Blob, blobCount) + for i := range blobCount { + copy(blobs[i][:], roBlobSidecars[i].Blob) + } + + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) + require.NoError(t, err) + + verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) + for _, roDataColumnSidecar := range roDataColumnSidecars { + verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar) + verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar) + } + + // Use all columns (no reconstruction needed since we have all non-extended columns 0-63) + reconstructedBlobs, err := peerdas.ReconstructBlobsData(verifiedRoSidecars, []int{1}, blobCount) + require.NoError(t, err) + require.Equal(t, 1, len(reconstructedBlobs)) + + // Verify blob matches + require.DeepEqual(t, blobs[1][:], reconstructedBlobs[0]) + }) +} + func TestComputeCellsAndProofsFromFlat(t *testing.T) { // Start the trusted setup. err := kzg.Start() @@ -310,7 +559,7 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) { // Create proofs for 2 blobs worth of columns cellProofs := make([][]byte, 2*numberOfColumns) - _, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) + _, _, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch) }) @@ -323,7 +572,8 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) { // Extract blobs and compute expected cells and proofs blobs := make([][]byte, blobCount) - expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount) + expectedCellsPerBlob := make([][]kzg.Cell, blobCount) + expectedProofsPerBlob := make([][]kzg.Proof, blobCount) var wg errgroup.Group for i := range blobCount { @@ -335,12 +585,13 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) { count := copy(kzgBlob[:], blob) require.Equal(t, len(kzgBlob), count) - cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) if err != nil { return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i) } - expectedCellsAndProofs[i] = cp + expectedCellsPerBlob[i] = cells + expectedProofsPerBlob[i] = proofs return nil }) } @@ -350,30 +601,30 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) { // Flatten proofs cellProofs := make([][]byte, 0, blobCount*numberOfColumns) - for _, cp := range expectedCellsAndProofs { - for _, proof := range cp.Proofs { + for _, proofs := range expectedProofsPerBlob { + for _, proof := range proofs { cellProofs = append(cellProofs, proof[:]) } } // Test ComputeCellsAndProofs - actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) + actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs) require.NoError(t, err) - require.Equal(t, blobCount, len(actualCellsAndProofs)) + require.Equal(t, blobCount, len(actualCellsPerBlob)) // Verify the results match expected for i := range blobCount { - require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells)) - require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs)) + require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i])) + require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i])) // Compare cells - for j, expectedCell := range expectedCellsAndProofs[i].Cells { - require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j]) + for j, expectedCell := range expectedCellsPerBlob[i] { + require.Equal(t, expectedCell, actualCellsPerBlob[i][j]) } // Compare proofs - for j, expectedProof := range expectedCellsAndProofs[i].Proofs { - require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j]) + for j, expectedProof := range expectedProofsPerBlob[i] { + require.Equal(t, expectedProof, actualProofsPerBlob[i][j]) } } }) @@ -381,7 +632,7 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) { func TestComputeCellsAndProofsFromStructured(t *testing.T) { t.Run("nil blob and proof", func(t *testing.T) { - _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil}) + _, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil}) require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof) }) @@ -397,7 +648,8 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) { // Extract blobs and compute expected cells and proofs blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount) - expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount) + expectedCellsPerBlob := make([][]kzg.Cell, blobCount) + expectedProofsPerBlob := make([][]kzg.Proof, blobCount) var wg errgroup.Group for i := range blobCount { @@ -408,14 +660,15 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) { count := copy(kzgBlob[:], blob) require.Equal(t, len(kzgBlob), count) - cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) if err != nil { return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i) } - expectedCellsAndProofs[i] = cellsAndProofs + expectedCellsPerBlob[i] = cells + expectedProofsPerBlob[i] = proofs - kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs)) - for _, proof := range cellsAndProofs.Proofs { + kzgProofs := make([][]byte, 0, len(proofs)) + for _, proof := range proofs { kzgProofs = append(kzgProofs, proof[:]) } @@ -433,24 +686,24 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) { require.NoError(t, err) // Test ComputeCellsAndProofs - actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs) + actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs) require.NoError(t, err) - require.Equal(t, blobCount, len(actualCellsAndProofs)) + require.Equal(t, blobCount, len(actualCellsPerBlob)) // Verify the results match expected for i := range blobCount { - require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells)) - require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs)) - require.Equal(t, len(expectedCellsAndProofs[i].Proofs), cap(actualCellsAndProofs[i].Proofs)) + require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i])) + require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i])) + require.Equal(t, len(expectedProofsPerBlob[i]), cap(actualProofsPerBlob[i])) // Compare cells - for j, expectedCell := range expectedCellsAndProofs[i].Cells { - require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j]) + for j, expectedCell := range expectedCellsPerBlob[i] { + require.Equal(t, expectedCell, actualCellsPerBlob[i][j]) } // Compare proofs - for j, expectedProof := range expectedCellsAndProofs[i].Proofs { - require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j]) + for j, expectedProof := range expectedProofsPerBlob[i] { + require.Equal(t, expectedProof, actualProofsPerBlob[i][j]) } } }) diff --git a/beacon-chain/core/peerdas/validator.go b/beacon-chain/core/peerdas/validator.go index 65575aef4626..892983293519 100644 --- a/beacon-chain/core/peerdas/validator.go +++ b/beacon-chain/core/peerdas/validator.go @@ -95,17 +95,21 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat // DataColumnSidecars, given ConstructionPopulator and the cells/proofs associated with each blob in the // block, assembles sidecars which can be distributed to peers. +// cellsPerBlob and proofsPerBlob are parallel slices where each index represents a blob. // This is an adapted version of // https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars, // which is designed to be used both when constructing sidecars from a block and from a sidecar, replacing // https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and // https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar -func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) { - if len(rows) == 0 { +func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) { + if len(cellsPerBlob) == 0 { return nil, nil } + if len(cellsPerBlob) != len(proofsPerBlob) { + return nil, errors.New("cells and proofs length mismatch") + } start := time.Now() - cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns) + cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, params.BeaconConfig().NumberOfColumns) if err != nil { return nil, errors.Wrap(err, "rotate cells and proofs") } @@ -197,26 +201,28 @@ func (b *BlockReconstructionSource) extract() (*blockInfo, error) { // rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns, // and returns a 2D slice where x is columns and y is rows. -func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) { - if len(rows) == 0 { +func rotateRowsToCols(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, numCols uint64) ([][][]byte, [][][]byte, error) { + if len(cellsPerBlob) == 0 { return nil, nil, nil } cellCols := make([][][]byte, numCols) proofCols := make([][][]byte, numCols) - for i, cp := range rows { - if uint64(len(cp.Cells)) != numCols { + for i := range cellsPerBlob { + cells := cellsPerBlob[i] + proofs := proofsPerBlob[i] + if uint64(len(cells)) != numCols { return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells") } - if len(cp.Cells) != len(cp.Proofs) { + if len(cells) != len(proofs) { return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs") } for j := uint64(0); j < numCols; j++ { if i == 0 { - cellCols[j] = make([][]byte, len(rows)) - proofCols[j] = make([][]byte, len(rows)) + cellCols[j] = make([][]byte, len(cellsPerBlob)) + proofCols[j] = make([][]byte, len(cellsPerBlob)) } - cellCols[j][i] = cp.Cells[j][:] - proofCols[j][i] = cp.Proofs[j][:] + cellCols[j][i] = cells[j][:] + proofCols[j][i] = proofs[j][:] } } return cellCols, proofCols, nil diff --git a/beacon-chain/core/peerdas/validator_test.go b/beacon-chain/core/peerdas/validator_test.go index e7923747af08..ea3de4d399b9 100644 --- a/beacon-chain/core/peerdas/validator_test.go +++ b/beacon-chain/core/peerdas/validator_test.go @@ -68,16 +68,16 @@ func TestDataColumnSidecars(t *testing.T) { require.NoError(t, err) // Create cells and proofs. - cellsAndProofs := []kzg.CellsAndProofs{ - { - Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns), - Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns), - }, + cellsPerBlob := [][]kzg.Cell{ + make([]kzg.Cell, params.BeaconConfig().NumberOfColumns), + } + proofsPerBlob := [][]kzg.Proof{ + make([]kzg.Proof, params.BeaconConfig().NumberOfColumns), } rob, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - _, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + _, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.ErrorIs(t, err, peerdas.ErrSizeMismatch) }) @@ -92,18 +92,18 @@ func TestDataColumnSidecars(t *testing.T) { // Create cells and proofs with insufficient cells for the number of columns. // This simulates a scenario where cellsAndProofs has fewer cells than expected columns. - cellsAndProofs := []kzg.CellsAndProofs{ - { - Cells: make([]kzg.Cell, 10), // Only 10 cells - Proofs: make([]kzg.Proof, 10), // Only 10 proofs - }, + cellsPerBlob := [][]kzg.Cell{ + make([]kzg.Cell, 10), // Only 10 cells + } + proofsPerBlob := [][]kzg.Proof{ + make([]kzg.Proof, 10), // Only 10 proofs } // This should fail because the function will try to access columns up to NumberOfColumns // but we only have 10 cells/proofs. rob, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - _, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + _, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars) }) @@ -118,17 +118,17 @@ func TestDataColumnSidecars(t *testing.T) { // Create cells and proofs with sufficient cells but insufficient proofs. numberOfColumns := params.BeaconConfig().NumberOfColumns - cellsAndProofs := []kzg.CellsAndProofs{ - { - Cells: make([]kzg.Cell, numberOfColumns), - Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns - }, + cellsPerBlob := [][]kzg.Cell{ + make([]kzg.Cell, numberOfColumns), + } + proofsPerBlob := [][]kzg.Proof{ + make([]kzg.Proof, 5), // Only 5 proofs, less than columns } // This should fail when trying to access proof beyond index 4. rob, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - _, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + _, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars) require.ErrorContains(t, "not enough proofs", err) }) @@ -150,28 +150,26 @@ func TestDataColumnSidecars(t *testing.T) { // Create cells and proofs with correct dimensions. numberOfColumns := params.BeaconConfig().NumberOfColumns - cellsAndProofs := []kzg.CellsAndProofs{ - { - Cells: make([]kzg.Cell, numberOfColumns), - Proofs: make([]kzg.Proof, numberOfColumns), - }, - { - Cells: make([]kzg.Cell, numberOfColumns), - Proofs: make([]kzg.Proof, numberOfColumns), - }, + cellsPerBlob := [][]kzg.Cell{ + make([]kzg.Cell, numberOfColumns), + make([]kzg.Cell, numberOfColumns), + } + proofsPerBlob := [][]kzg.Proof{ + make([]kzg.Proof, numberOfColumns), + make([]kzg.Proof, numberOfColumns), } // Set distinct values in cells and proofs for testing for i := range numberOfColumns { - cellsAndProofs[0].Cells[i][0] = byte(i) - cellsAndProofs[0].Proofs[i][0] = byte(i) - cellsAndProofs[1].Cells[i][0] = byte(i + 128) - cellsAndProofs[1].Proofs[i][0] = byte(i + 128) + cellsPerBlob[0][i][0] = byte(i) + proofsPerBlob[0][i][0] = byte(i) + cellsPerBlob[1][i][0] = byte(i + 128) + proofsPerBlob[1][i][0] = byte(i + 128) } rob, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.NoError(t, err) require.NotNil(t, sidecars) require.Equal(t, int(numberOfColumns), len(sidecars)) @@ -215,28 +213,26 @@ func TestReconstructionSource(t *testing.T) { // Create cells and proofs with correct dimensions. numberOfColumns := params.BeaconConfig().NumberOfColumns - cellsAndProofs := []kzg.CellsAndProofs{ - { - Cells: make([]kzg.Cell, numberOfColumns), - Proofs: make([]kzg.Proof, numberOfColumns), - }, - { - Cells: make([]kzg.Cell, numberOfColumns), - Proofs: make([]kzg.Proof, numberOfColumns), - }, + cellsPerBlob := [][]kzg.Cell{ + make([]kzg.Cell, numberOfColumns), + make([]kzg.Cell, numberOfColumns), + } + proofsPerBlob := [][]kzg.Proof{ + make([]kzg.Proof, numberOfColumns), + make([]kzg.Proof, numberOfColumns), } // Set distinct values in cells and proofs for testing for i := range numberOfColumns { - cellsAndProofs[0].Cells[i][0] = byte(i) - cellsAndProofs[0].Proofs[i][0] = byte(i) - cellsAndProofs[1].Cells[i][0] = byte(i + 128) - cellsAndProofs[1].Proofs[i][0] = byte(i + 128) + cellsPerBlob[0][i][0] = byte(i) + proofsPerBlob[0][i][0] = byte(i) + cellsPerBlob[1][i][0] = byte(i + 128) + proofsPerBlob[1][i][0] = byte(i + 128) } rob, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.NoError(t, err) require.NotNil(t, sidecars) require.Equal(t, int(numberOfColumns), len(sidecars)) diff --git a/beacon-chain/execution/engine_client.go b/beacon-chain/execution/engine_client.go index c8bbcd997a80..93a2f4a1dcdd 100644 --- a/beacon-chain/execution/engine_client.go +++ b/beacon-chain/execution/engine_client.go @@ -660,18 +660,18 @@ func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator pee return nil, wrapWithBlockRoot(err, root, "commitments") } - cellsAndProofs, err := s.fetchCellsAndProofsFromExecution(ctx, commitments) + cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments) if err != nil { return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client") } // Return early if nothing is returned from the EL. - if len(cellsAndProofs) == 0 { + if len(cellsPerBlob) == 0 { return nil, nil } // Construct data column sidears from the signed block and cells and proofs. - roSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, populator) + roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator) if err != nil { return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar") } @@ -684,7 +684,7 @@ func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator pee } // fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method) -func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) { +func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) { // Collect KZG hashes for all blobs. versionedHashes := make([]common.Hash, 0, len(kzgCommitments)) for _, commitment := range kzgCommitments { @@ -695,21 +695,21 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi // Fetch all blobsAndCellsProofs from the execution client. blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes) if err != nil { - return nil, errors.Wrapf(err, "get blobs V2") + return nil, nil, errors.Wrapf(err, "get blobs V2") } // Return early if nothing is returned from the EL. if len(blobAndProofV2s) == 0 { - return nil, nil + return nil, nil, nil } // Compute cells and proofs from the blobs and cell proofs. - cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s) + cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s) if err != nil { - return nil, errors.Wrap(err, "compute cells and proofs") + return nil, nil, errors.Wrap(err, "compute cells and proofs") } - return cellsAndProofs, nil + return cellsPerBlob, proofsPerBlob, nil } // upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars. diff --git a/beacon-chain/rpc/eth/beacon/handlers_test.go b/beacon-chain/rpc/eth/beacon/handlers_test.go index d39830dec98a..0f790b2ffad0 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_test.go +++ b/beacon-chain/rpc/eth/beacon/handlers_test.go @@ -5018,12 +5018,12 @@ func Test_validateBlobs(t *testing.T) { numberOfColumns := params.BeaconConfig().NumberOfColumns cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns) for blobIdx := 0; blobIdx < blobCount; blobIdx++ { - cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx]) + _, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx]) require.NoError(t, err) for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ { cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx - cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:] + cellProofs[cellProofIdx] = proofs[colIdx][:] } } diff --git a/beacon-chain/rpc/eth/blob/handlers.go b/beacon-chain/rpc/eth/blob/handlers.go index b11fca3b8dd8..77fb38d7f7e4 100644 --- a/beacon-chain/rpc/eth/blob/handlers.go +++ b/beacon-chain/rpc/eth/blob/handlers.go @@ -134,9 +134,6 @@ func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) { segments := strings.Split(r.URL.Path, "/") blockId := segments[len(segments)-1] - var verifiedBlobs []*blocks.VerifiedROBlob - var rpcErr *core.RpcError - // Check if versioned_hashes parameter is provided versionedHashesStr := r.URL.Query()["versioned_hashes"] versionedHashes := make([][]byte, len(versionedHashesStr)) @@ -149,7 +146,7 @@ func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) { versionedHashes[i] = hash } } - verifiedBlobs, rpcErr = s.Blocker.Blobs(ctx, blockId, options.WithVersionedHashes(versionedHashes)) + blobsData, rpcErr := s.Blocker.BlobsData(ctx, blockId, options.WithVersionedHashes(versionedHashes)) if rpcErr != nil { code := core.ErrorReasonToHTTP(rpcErr.Reason) switch code { @@ -175,9 +172,9 @@ func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) { if httputil.RespondWithSsz(r) { sszLen := fieldparams.BlobSize - sszData := make([]byte, len(verifiedBlobs)*sszLen) - for i := range verifiedBlobs { - copy(sszData[i*sszLen:(i+1)*sszLen], verifiedBlobs[i].Blob) + sszData := make([]byte, len(blobsData)*sszLen) + for i := range blobsData { + copy(sszData[i*sszLen:(i+1)*sszLen], blobsData[i]) } w.Header().Set(api.VersionHeader, version.String(blk.Version())) @@ -196,9 +193,9 @@ func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) { return } - data := make([]string, len(verifiedBlobs)) - for i, v := range verifiedBlobs { - data[i] = hexutil.Encode(v.Blob) + data := make([]string, len(blobsData)) + for i, blob := range blobsData { + data[i] = hexutil.Encode(blob) } resp := &structs.GetBlobsResponse{ Data: data, diff --git a/beacon-chain/rpc/lookup/blocker.go b/beacon-chain/rpc/lookup/blocker.go index f618808dce6e..268f53d3a7e7 100644 --- a/beacon-chain/rpc/lookup/blocker.go +++ b/beacon-chain/rpc/lookup/blocker.go @@ -61,6 +61,7 @@ func (e BlockIdParseError) Error() string { type Blocker interface { Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error) Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) + BlobsData(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) } @@ -224,23 +225,18 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read return blk, nil } -// Blobs returns the fetched blobs for a given block ID with configurable options. -// Options can specify either blob indices or versioned hashes for retrieval. -// The identifier can be one of: -// - "head" (canonical head in node's view) -// - "genesis" -// - "finalized" -// - "justified" -// - -// - -// - -// -// cases: -// - no block, 404 -// - block exists, has commitments, inside retention period (greater of protocol- or user-specified) serve then w/ 200 unless we hit an error reading them. -// we are technically not supposed to import a block to forkchoice unless we have the blobs, so the nuance here is if we can't find the file and we are inside the protocol-defined retention period, then it's actually a 500. -// - block exists, has commitments, outside retention period (greater of protocol- or user-specified) - ie just like block exists, no commitment -func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) { +// blobsContext holds common information needed for blob retrieval +type blobsContext struct { + root [fieldparams.RootLength]byte + roBlock blocks.ROBlock + commitments [][]byte + indices []int + fuluForkSlot primitives.Slot +} + +// resolveBlobsContext extracts common blob retrieval logic including block resolution, +// validation, and index conversion from versioned hashes. +func (p *BeaconDbBlocker) resolveBlobsContext(ctx context.Context, id string, opts ...options.BlobsOption) (*blobsContext, *core.RpcError) { // Apply options cfg := &options.BlobsConfig{} for _, opt := range opts { @@ -279,11 +275,6 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options. return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", root), Reason: core.Internal} } - // If there are no commitments return 200 w/ empty list - if len(commitments) == 0 { - return make([]*blocks.VerifiedROBlob, 0), nil - } - // Compute the first Fulu slot. fuluForkSlot := primitives.Slot(math.MaxUint64) if fuluForkEpoch := params.BeaconConfig().FuluForkEpoch; fuluForkEpoch != primitives.Epoch(math.MaxUint64) { @@ -333,16 +324,156 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options. } } + // Create ROBlock with root for post-Fulu blocks + var roBlockWithRoot blocks.ROBlock if roBlock.Slot() >= fuluForkSlot { - roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root) + roBlockWithRoot, err = blocks.NewROBlockWithRoot(roSignedBlock, root) if err != nil { return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal} } + } + + return &blobsContext{ + root: root, + roBlock: roBlockWithRoot, + commitments: commitments, + indices: indices, + fuluForkSlot: fuluForkSlot, + }, nil +} + +// Blobs returns the fetched blob sidecars (with full KZG proofs) for a given block ID. +// Options can specify either blob indices or versioned hashes for retrieval. +// The identifier can be one of: +// - "head" (canonical head in node's view) +// - "genesis" +// - "finalized" +// - "justified" +// - +// - +// - +func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) { + bctx, rpcErr := p.resolveBlobsContext(ctx, id, opts...) + if rpcErr != nil { + return nil, rpcErr + } + + // If there are no commitments return 200 w/ empty list + if len(bctx.commitments) == 0 { + return make([]*blocks.VerifiedROBlob, 0), nil + } + + // Check if this is a post-Fulu block (uses data columns) + if bctx.roBlock.Root() != [32]byte{} { + return p.blobSidecarsFromStoredDataColumns(bctx.roBlock, bctx.indices) + } + + // Pre-Fulu block (uses blob sidecars) + return p.blobsFromStoredBlobs(bctx.commitments, bctx.root, bctx.indices) +} + +// BlobsData returns just the blob data without computing KZG proofs or creating full sidecars. +// This is an optimized endpoint for when only blob data is needed (e.g., GetBlobs endpoint). +// The identifier can be one of: +// - "head" (canonical head in node's view) +// - "genesis" +// - "finalized" +// - "justified" +// - +// - +// - +func (p *BeaconDbBlocker) BlobsData(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError) { + bctx, rpcErr := p.resolveBlobsContext(ctx, id, opts...) + if rpcErr != nil { + return nil, rpcErr + } + + // If there are no commitments return 200 w/ empty list + if len(bctx.commitments) == 0 { + return make([][]byte, 0), nil + } + + // Check if this is a post-Fulu block (uses data columns) + if bctx.roBlock.Root() != [32]byte{} { + return p.blobsDataFromStoredDataColumns(bctx.root, bctx.indices, len(bctx.commitments)) + } + + // Pre-Fulu block (uses blob sidecars) + return p.blobsDataFromStoredBlobs(bctx.root, bctx.indices) +} + +// blobsDataFromStoredBlobs retrieves just blob data (without proofs) from stored blob sidecars. +func (p *BeaconDbBlocker) blobsDataFromStoredBlobs(root [fieldparams.RootLength]byte, indices []int) ([][]byte, *core.RpcError) { + summary := p.BlobStorage.Summary(root) + + // If no indices are provided, use all indices that are available in the summary. + if len(indices) == 0 { + maxBlobCount := summary.MaxBlobsForEpoch() + for index := 0; uint64(index) < maxBlobCount; index++ { // needed for safe conversion + if summary.HasIndex(uint64(index)) { + indices = append(indices, index) + } + } + } + + // Retrieve blob sidecars from the store and extract just the blob data. + blobsData := make([][]byte, 0, len(indices)) + for _, index := range indices { + if !summary.HasIndex(uint64(index)) { + return nil, &core.RpcError{ + Err: fmt.Errorf("requested index %d not found", index), + Reason: core.NotFound, + } + } + + blobSidecar, err := p.BlobStorage.Get(root, uint64(index)) + if err != nil { + return nil, &core.RpcError{ + Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), + Reason: core.Internal, + } + } + + blobsData = append(blobsData, blobSidecar.Blob) + } + + return blobsData, nil +} + +// blobsDataFromStoredDataColumns retrieves blob data from stored data columns without computing KZG proofs. +func (p *BeaconDbBlocker) blobsDataFromStoredDataColumns(root [fieldparams.RootLength]byte, indices []int, blobCount int) ([][]byte, *core.RpcError) { + // Count how many columns we have in the store. + summary := p.DataColumnStorage.Summary(root) + stored := summary.Stored() + count := uint64(len(stored)) + + if count < peerdas.MinimumColumnCountToReconstruct() { + // There is no way to reconstruct the data columns. + return nil, &core.RpcError{ + Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name), + Reason: core.NotFound, + } + } + + // Retrieve from the database needed data columns. + verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored) + if err != nil { + return nil, &core.RpcError{ + Err: errors.Wrap(err, "needed data column sidecars"), + Reason: core.Internal, + } + } - return p.blobsFromStoredDataColumns(roBlock, indices) + // Use optimized path to get just blob data without computing proofs. + blobsData, err := peerdas.ReconstructBlobsData(verifiedRoDataColumnSidecars, indices, blobCount) + if err != nil { + return nil, &core.RpcError{ + Err: errors.Wrap(err, "reconstruct blobs data"), + Reason: core.Internal, + } } - return p.blobsFromStoredBlobs(commitments, root, indices) + return blobsData, nil } // blobsFromStoredBlobs retrieves blob sidercars corresponding to `indices` and `root` from the store. @@ -393,13 +524,12 @@ func (p *BeaconDbBlocker) blobsFromStoredBlobs(commitments [][]byte, root [field return blobs, nil } -// blobsFromStoredDataColumns retrieves data column sidecars from the store, -// reconstructs the whole matrix if needed, converts the matrix to blobs, -// and then returns converted blobs corresponding to `indices` and `root`. +// blobSidecarsFromStoredDataColumns retrieves data column sidecars from the store, +// reconstructs the whole matrix if needed, converts the matrix to blob sidecars with full KZG proofs. // This function expects data column sidecars to be stored (aka. no blob sidecars). // If not enough data column sidecars are available to convert blobs from them // (either directly or after reconstruction), an error is returned. -func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) { +func (p *BeaconDbBlocker) blobSidecarsFromStoredDataColumns(block blocks.ROBlock, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) { root := block.Root() // Use all indices if none are provided. @@ -439,7 +569,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indic } } - // Reconstruct blob sidecars from data column sidecars. + // Reconstruct blob sidecars with full KZG proofs. verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices) if err != nil { return nil, &core.RpcError{ diff --git a/beacon-chain/rpc/lookup/blocker_test.go b/beacon-chain/rpc/lookup/blocker_test.go index bf8f967c6485..70a1f9127071 100644 --- a/beacon-chain/rpc/lookup/blocker_test.go +++ b/beacon-chain/rpc/lookup/blocker_test.go @@ -306,16 +306,18 @@ func TestGetBlob(t *testing.T) { fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, blobCount) fuluBlockRoot := fuluBlock.Root() - cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars)) + cellsPerBlobList := make([][]kzg.Cell, 0, len(fuluBlobSidecars)) + proofsPerBlobList := make([][]kzg.Proof, 0, len(fuluBlobSidecars)) for _, blob := range fuluBlobSidecars { var kzgBlob kzg.Blob copy(kzgBlob[:], blob.Blob) - cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) require.NoError(t, err) - cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs) + cellsPerBlobList = append(cellsPerBlobList, cells) + proofsPerBlobList = append(proofsPerBlobList, proofs) } - roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlobList, proofsPerBlobList, peerdas.PopulateFromBlock(fuluBlock)) require.NoError(t, err) verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) @@ -665,16 +667,18 @@ func TestBlobs_CommitmentOrdering(t *testing.T) { require.Equal(t, 3, len(commitments)) // Convert blob sidecars to data column sidecars for Fulu - cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobs)) + cellsPerBlobList := make([][]kzg.Cell, 0, len(fuluBlobs)) + proofsPerBlobList := make([][]kzg.Proof, 0, len(fuluBlobs)) for _, blob := range fuluBlobs { var kzgBlob kzg.Blob copy(kzgBlob[:], blob.Blob) - cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) require.NoError(t, err) - cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs) + cellsPerBlobList = append(cellsPerBlobList, cells) + proofsPerBlobList = append(proofsPerBlobList, proofs) } - dataColumnSidecarPb, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock)) + dataColumnSidecarPb, err := peerdas.DataColumnSidecars(cellsPerBlobList, proofsPerBlobList, peerdas.PopulateFromBlock(fuluBlock)) require.NoError(t, err) verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb)) @@ -829,16 +833,18 @@ func TestGetDataColumns(t *testing.T) { fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount) fuluBlockRoot := fuluBlock.Root() - cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars)) + cellsPerBlobList := make([][]kzg.Cell, 0, len(fuluBlobSidecars)) + proofsPerBlobList := make([][]kzg.Proof, 0, len(fuluBlobSidecars)) for _, blob := range fuluBlobSidecars { var kzgBlob kzg.Blob copy(kzgBlob[:], blob.Blob) - cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob) require.NoError(t, err) - cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs) + cellsPerBlobList = append(cellsPerBlobList, cells) + proofsPerBlobList = append(proofsPerBlobList, proofs) } - roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlobList, proofsPerBlobList, peerdas.PopulateFromBlock(fuluBlock)) require.NoError(t, err) verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars)) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go index 091a745737d0..390bd525cb57 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go @@ -413,13 +413,13 @@ func (vs *Server) handleUnblindedBlock( if block.Version() >= version.Fulu { // Compute cells and proofs from the blobs and cell proofs. - cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs) + cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs) if err != nil { return nil, nil, errors.Wrap(err, "compute cells and proofs") } // Construct data column sidecars from the signed block and cells and proofs. - roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(block)) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block)) if err != nil { return nil, nil, errors.Wrap(err, "data column sidcars") } diff --git a/beacon-chain/rpc/testutil/mock_blocker.go b/beacon-chain/rpc/testutil/mock_blocker.go index 284aa6158c1b..c837a4eefeb7 100644 --- a/beacon-chain/rpc/testutil/mock_blocker.go +++ b/beacon-chain/rpc/testutil/mock_blocker.go @@ -44,6 +44,11 @@ func (*MockBlocker) Blobs(_ context.Context, _ string, _ ...options.BlobsOption) return nil, &core.RpcError{} } +// BlobsData -- +func (*MockBlocker) BlobsData(_ context.Context, _ string, _ ...options.BlobsOption) ([][]byte, *core.RpcError) { + return nil, &core.RpcError{} +} + // DataColumns -- func (m *MockBlocker) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) { if m.DataColumnsFunc != nil { diff --git a/beacon-chain/verification/data_column_test.go b/beacon-chain/verification/data_column_test.go index e4a92d41bf99..fd409b16b93a 100644 --- a/beacon-chain/verification/data_column_test.go +++ b/beacon-chain/verification/data_column_test.go @@ -29,8 +29,8 @@ func GenerateTestDataColumns(t *testing.T, parent [fieldparams.RootLength]byte, blobs = append(blobs, kzg.Blob(roBlobs[i].Blob)) } - cellsAndProofs := util.GenerateCellsAndProofs(t, blobs) - roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock)) + cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs) + roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock)) require.NoError(t, err) return roDataColumnSidecars diff --git a/changelog/james-prysm_optimize-get-blobs.md b/changelog/james-prysm_optimize-get-blobs.md new file mode 100644 index 000000000000..6071fb29979c --- /dev/null +++ b/changelog/james-prysm_optimize-get-blobs.md @@ -0,0 +1,3 @@ +### Ignored + +- optimization to remove cell and blob proof computation on blob rest api. \ No newline at end of file diff --git a/testing/spectest/general/fulu__kzg__compute_cells_and_kzg_proofs_test.go b/testing/spectest/general/fulu__kzg__compute_cells_and_kzg_proofs_test.go index 1dc122777db5..a7f845512106 100644 --- a/testing/spectest/general/fulu__kzg__compute_cells_and_kzg_proofs_test.go +++ b/testing/spectest/general/fulu__kzg__compute_cells_and_kzg_proofs_test.go @@ -42,18 +42,16 @@ func TestComputeCellsAndKzgProofs(t *testing.T) { } b := kzgPrysm.Blob(blob) - cellsAndProofsForBlob, err := kzgPrysm.ComputeCellsAndKZGProofs(&b) + cells, proofs, err := kzgPrysm.ComputeCellsAndKZGProofs(&b) if test.Output != nil { require.NoError(t, err) var combined [][]string - cs := cellsAndProofsForBlob.Cells - csRaw := make([]string, 0, len(cs)) - for _, c := range cs { + csRaw := make([]string, 0, len(cells)) + for _, c := range cells { csRaw = append(csRaw, hexutil.Encode(c[:])) } - ps := cellsAndProofsForBlob.Proofs - psRaw := make([]string, 0, len(ps)) - for _, p := range ps { + psRaw := make([]string, 0, len(proofs)) + for _, p := range proofs { psRaw = append(psRaw, hexutil.Encode(p[:])) } combined = append(combined, csRaw) diff --git a/testing/spectest/general/fulu__kzg__recover_cells_and_kzg_proofs_test.go b/testing/spectest/general/fulu__kzg__recover_cells_and_kzg_proofs_test.go index d6731ad2b36a..adb7e208d4ee 100644 --- a/testing/spectest/general/fulu__kzg__recover_cells_and_kzg_proofs_test.go +++ b/testing/spectest/general/fulu__kzg__recover_cells_and_kzg_proofs_test.go @@ -69,18 +69,16 @@ func TestRecoverCellsAndKzgProofs(t *testing.T) { } // Recover the cells and proofs for the corresponding blob - cellsAndProofsForBlob, err := kzgPrysm.RecoverCellsAndKZGProofs(cellIndices, cells) + recoveredCells, recoveredProofs, err := kzgPrysm.RecoverCellsAndKZGProofs(cellIndices, cells) if test.Output != nil { require.NoError(t, err) var combined [][]string - cs := cellsAndProofsForBlob.Cells - csRaw := make([]string, 0, len(cs)) - for _, c := range cs { + csRaw := make([]string, 0, len(recoveredCells)) + for _, c := range recoveredCells { csRaw = append(csRaw, hexutil.Encode(c[:])) } - ps := cellsAndProofsForBlob.Proofs - psRaw := make([]string, 0, len(ps)) - for _, p := range ps { + psRaw := make([]string, 0, len(recoveredProofs)) + for _, p := range recoveredProofs { psRaw = append(psRaw, hexutil.Encode(p[:])) } combined = append(combined, csRaw) diff --git a/testing/util/fulu.go b/testing/util/fulu.go index 66613adf9cd8..7566294b7af0 100644 --- a/testing/util/fulu.go +++ b/testing/util/fulu.go @@ -146,11 +146,11 @@ func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...F signedBeaconBlock, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - cellsAndProofs := GenerateCellsAndProofs(t, blobs) + cellsPerBlob, proofsPerBlob := GenerateCellsAndProofs(t, blobs) rob, err := blocks.NewROBlockWithRoot(signedBeaconBlock, root) require.NoError(t, err) - roSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob)) + roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob)) require.NoError(t, err) verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars)) @@ -167,12 +167,14 @@ func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...F return roBlock, roSidecars, verifiedRoSidecars } -func GenerateCellsAndProofs(t testing.TB, blobs []kzg.Blob) []kzg.CellsAndProofs { - cellsAndProofs := make([]kzg.CellsAndProofs, len(blobs)) +func GenerateCellsAndProofs(t testing.TB, blobs []kzg.Blob) ([][]kzg.Cell, [][]kzg.Proof) { + cellsPerBlob := make([][]kzg.Cell, len(blobs)) + proofsPerBlob := make([][]kzg.Proof, len(blobs)) for i := range blobs { - cp, err := kzg.ComputeCellsAndKZGProofs(&blobs[i]) + cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&blobs[i]) require.NoError(t, err) - cellsAndProofs[i] = cp + cellsPerBlob[i] = cells + proofsPerBlob[i] = proofs } - return cellsAndProofs + return cellsPerBlob, proofsPerBlob }