Skip to content
This repository was archived by the owner on Jan 22, 2025. It is now read-only.

Commit a7b8290

Browse files
committed
removes SIZE_OF_DATA_SHRED_PAYLOAD
1 parent fd1d095 commit a7b8290

File tree

7 files changed

+41
-48
lines changed

7 files changed

+41
-48
lines changed

core/benches/shredder.rs

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,16 @@ use {
99
solana_entry::entry::{create_ticks, Entry},
1010
solana_ledger::shred::{
1111
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, ShredFlags,
12-
Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD,
12+
Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
1313
},
1414
solana_perf::test_tx,
1515
solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair},
1616
test::Bencher,
1717
};
1818

19+
// Equivalent to ledger::shred::legacy::ShredData::CAPACITY.
20+
const LEGACY_SHERD_DATA_CAPACITY: usize = 1051;
21+
1922
// Copied these values here to avoid exposing shreds
2023
// internals only for the sake of benchmarks.
2124

@@ -38,12 +41,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
3841
}
3942

4043
fn make_shreds(num_shreds: usize) -> Vec<Shred> {
41-
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
4244
let txs_per_entry = 128;
4345
let num_entries = max_entries_per_n_shred(
4446
&make_test_entry(txs_per_entry),
4547
2 * num_shreds as u64,
46-
Some(shred_size),
48+
Some(LEGACY_SHERD_DATA_CAPACITY),
4749
);
4850
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
4951
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
@@ -73,10 +75,10 @@ fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
7375
#[bench]
7476
fn bench_shredder_ticks(bencher: &mut Bencher) {
7577
let kp = Keypair::new();
76-
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
78+
let shred_size = LEGACY_SHERD_DATA_CAPACITY;
7779
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
7880
// ~1Mb
79-
let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
81+
let num_ticks = max_ticks_per_n_shreds(1, Some(LEGACY_SHERD_DATA_CAPACITY)) * num_shreds as u64;
8082
let entries = create_ticks(num_ticks, 0, Hash::default());
8183
bencher.iter(|| {
8284
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
@@ -87,7 +89,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
8789
#[bench]
8890
fn bench_shredder_large_entries(bencher: &mut Bencher) {
8991
let kp = Keypair::new();
90-
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
92+
let shred_size = LEGACY_SHERD_DATA_CAPACITY;
9193
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
9294
let txs_per_entry = 128;
9395
let num_entries = max_entries_per_n_shred(
@@ -106,7 +108,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
106108
#[bench]
107109
fn bench_deshredder(bencher: &mut Bencher) {
108110
let kp = Keypair::new();
109-
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
111+
let shred_size = LEGACY_SHERD_DATA_CAPACITY;
110112
// ~10Mb
111113
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
112114
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
@@ -121,7 +123,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
121123

122124
#[bench]
123125
fn bench_deserialize_hdr(bencher: &mut Bencher) {
124-
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
126+
let data = vec![0; LEGACY_SHERD_DATA_CAPACITY];
125127

126128
let shred = Shred::new_from_data(2, 1, 1, &data, ShredFlags::LAST_SHRED_IN_SLOT, 0, 0, 1);
127129

core/src/replay_stage.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3180,7 +3180,7 @@ pub(crate) mod tests {
31803180
create_new_tmp_ledger,
31813181
genesis_utils::{create_genesis_config, create_genesis_config_with_leader},
31823182
get_tmp_ledger_path,
3183-
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
3183+
shred::{Shred, ShredFlags},
31843184
},
31853185
solana_rpc::{
31863186
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
@@ -3779,7 +3779,7 @@ pub(crate) mod tests {
37793779
fn test_dead_fork_entry_deserialize_failure() {
37803780
// Insert entry that causes deserialization failure
37813781
let res = check_dead_fork(|_, bank| {
3782-
let gibberish = [0xa5u8; SIZE_OF_DATA_SHRED_PAYLOAD];
3782+
let gibberish = [0xa5u8; /*legacy data-shred capacity:*/1051];
37833783
let parent_offset = bank.slot() - bank.parent_slot();
37843784
let shred = Shred::new_from_data(
37853785
bank.slot(),

ledger/benches/sigverify_shreds.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
extern crate test;
44
use {
55
solana_ledger::{
6-
shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
6+
shred::{Shred, ShredFlags},
77
sigverify_shreds::{sign_shreds_cpu, sign_shreds_gpu, sign_shreds_gpu_pinned_keypair},
88
},
99
solana_perf::{
@@ -32,7 +32,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
3232
slot,
3333
0xc0de,
3434
0xdead,
35-
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
35+
&[5; /*legacy data-shred capacity:*/1051],
3636
ShredFlags::LAST_SHRED_IN_SLOT,
3737
1,
3838
2,
@@ -63,7 +63,7 @@ fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
6363
slot,
6464
0xc0de,
6565
0xdead,
66-
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
66+
&[5; /*legacy data-shred capacity:*/1051],
6767
ShredFlags::LAST_SHRED_IN_SLOT,
6868
1,
6969
2,

ledger/src/shred.rs

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ use {
6565
solana_sdk::{
6666
clock::Slot,
6767
hash::{hashv, Hash},
68-
packet::PACKET_DATA_SIZE,
6968
pubkey::Pubkey,
7069
signature::{Keypair, Signature, Signer},
7170
},
@@ -98,15 +97,6 @@ pub const SIZE_OF_NONCE: usize = 4;
9897
const_assert_eq!(SIZE_OF_CODING_SHRED_HEADERS, 89);
9998
const SIZE_OF_CODING_SHRED_HEADERS: usize =
10099
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
101-
// XXX This is only true for the legacy shreds;
102-
// Maximum size of data that a data-shred may contain (excluding headers).
103-
const_assert_eq!(SIZE_OF_DATA_SHRED_PAYLOAD, 1051);
104-
// XXX rename DataShred::capacity
105-
pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
106-
- SIZE_OF_COMMON_SHRED_HEADER
107-
- SIZE_OF_DATA_SHRED_HEADER
108-
- SIZE_OF_CODING_SHRED_HEADERS
109-
- SIZE_OF_NONCE;
110100
const_assert_eq!(SHRED_DATA_OFFSET, 88);
111101
const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
112102

@@ -662,7 +652,8 @@ pub fn max_entries_per_n_shred(
662652
num_shreds: u64,
663653
shred_data_size: Option<usize>,
664654
) -> u64 {
665-
let shred_data_size = shred_data_size.unwrap_or(SIZE_OF_DATA_SHRED_PAYLOAD) as u64;
655+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
656+
let shred_data_size = shred_data_size.unwrap_or(data_buffer_size) as u64;
666657
let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
667658
let entry_size = bincode::serialized_size(entry).unwrap();
668659
let count_size = vec_size - entry_size;
@@ -936,7 +927,7 @@ mod tests {
936927
let seed = <[u8; 32]>::try_from(bs58_decode(SEED)).unwrap();
937928
ChaChaRng::from_seed(seed)
938929
};
939-
let mut data = [0u8; SIZE_OF_DATA_SHRED_PAYLOAD];
930+
let mut data = [0u8; legacy::ShredData::CAPACITY];
940931
rng.fill(&mut data[..]);
941932
let keypair = Keypair::generate(&mut rng);
942933
let mut shred = Shred::new_from_data(

ledger/src/shredder.rs

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use {
22
crate::shred::{
3-
Error, ProcessShredsStats, Shred, ShredFlags, MAX_DATA_SHREDS_PER_FEC_BLOCK,
4-
SIZE_OF_DATA_SHRED_PAYLOAD,
3+
Error, ProcessShredsStats, Shred, ShredData, ShredFlags, MAX_DATA_SHREDS_PER_FEC_BLOCK,
54
},
65
lazy_static::lazy_static,
76
rayon::{prelude::*, ThreadPool},
@@ -110,9 +109,9 @@ impl Shredder {
110109
serialize_time.stop();
111110

112111
let mut gen_data_time = Measure::start("shred_gen_data_time");
113-
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
112+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
114113
// Integer division to ensure we have enough shreds to fit all the data
115-
let num_shreds = (serialized_shreds.len() + payload_capacity - 1) / payload_capacity;
114+
let num_shreds = (serialized_shreds.len() + data_buffer_size - 1) / data_buffer_size;
116115
let last_shred_index = next_shred_index + num_shreds as u32 - 1;
117116
// 1) Generate data shreds
118117
let make_data_shred = |shred_index: u32, data| {
@@ -141,7 +140,7 @@ impl Shredder {
141140
};
142141
let data_shreds: Vec<Shred> = PAR_THREAD_POOL.install(|| {
143142
serialized_shreds
144-
.par_chunks(payload_capacity)
143+
.par_chunks(data_buffer_size)
145144
.enumerate()
146145
.map(|(i, shred_data)| {
147146
let shred_index = next_shred_index + i as u32;
@@ -341,7 +340,8 @@ impl Shredder {
341340
// For backward compatibility. This is needed when the data shred
342341
// payload is None, so that deserializing to Vec<Entry> results in
343342
// an empty vector.
344-
Ok(vec![0u8; SIZE_OF_DATA_SHRED_PAYLOAD])
343+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
344+
Ok(vec![0u8; data_buffer_size])
345345
} else {
346346
Ok(data)
347347
}
@@ -401,13 +401,13 @@ mod tests {
401401
})
402402
.collect();
403403

404-
let size = serialized_size(&entries).unwrap();
404+
let size = serialized_size(&entries).unwrap() as usize;
405405
// Integer division to ensure we have enough shreds to fit all the data
406-
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
407-
let num_expected_data_shreds = (size + payload_capacity - 1) / payload_capacity;
406+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
407+
let num_expected_data_shreds = (size + data_buffer_size - 1) / data_buffer_size;
408408
let num_expected_coding_shreds = (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
409-
.saturating_sub(num_expected_data_shreds as usize)
410-
.max(num_expected_data_shreds as usize);
409+
.saturating_sub(num_expected_data_shreds)
410+
.max(num_expected_data_shreds);
411411
let start_index = 0;
412412
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
413413
&keypair,
@@ -417,14 +417,14 @@ mod tests {
417417
start_index, // next_code_index
418418
);
419419
let next_index = data_shreds.last().unwrap().index() + 1;
420-
assert_eq!(next_index as u64, num_expected_data_shreds);
420+
assert_eq!(next_index as usize, num_expected_data_shreds);
421421

422422
let mut data_shred_indexes = HashSet::new();
423423
let mut coding_shred_indexes = HashSet::new();
424424
for shred in data_shreds.iter() {
425425
assert_eq!(shred.shred_type(), ShredType::Data);
426426
let index = shred.index();
427-
let is_last = index as u64 == num_expected_data_shreds - 1;
427+
let is_last = index as usize == num_expected_data_shreds - 1;
428428
verify_test_data_shred(
429429
shred,
430430
index,
@@ -455,7 +455,7 @@ mod tests {
455455
assert!(coding_shred_indexes.contains(&i));
456456
}
457457

458-
assert_eq!(data_shred_indexes.len() as u64, num_expected_data_shreds);
458+
assert_eq!(data_shred_indexes.len(), num_expected_data_shreds);
459459
assert_eq!(coding_shred_indexes.len(), num_expected_coding_shreds);
460460

461461
// Test reassembly
@@ -573,8 +573,8 @@ mod tests {
573573
let keypair = Arc::new(Keypair::new());
574574
let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap();
575575
// Create enough entries to make > 1 shred
576-
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
577-
let num_entries = max_ticks_per_n_shreds(1, Some(payload_capacity)) + 1;
576+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
577+
let num_entries = max_ticks_per_n_shreds(1, Some(data_buffer_size)) + 1;
578578
let entries: Vec<_> = (0..num_entries)
579579
.map(|_| {
580580
let keypair0 = Keypair::new();
@@ -622,9 +622,9 @@ mod tests {
622622
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
623623

624624
let num_data_shreds: usize = 5;
625-
let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD;
625+
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
626626
let num_entries =
627-
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(payload_capacity));
627+
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(data_buffer_size));
628628
let entries: Vec<_> = (0..num_entries)
629629
.map(|_| {
630630
let keypair0 = Keypair::new();

ledger/src/sigverify_shreds.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ pub fn sign_shreds_gpu(
459459
pub mod tests {
460460
use {
461461
super::*,
462-
crate::shred::{Shred, ShredFlags, SIZE_OF_DATA_SHRED_PAYLOAD},
462+
crate::shred::{Shred, ShredFlags},
463463
solana_sdk::signature::{Keypair, Signer},
464464
};
465465

@@ -636,7 +636,7 @@ pub mod tests {
636636
slot,
637637
0xc0de,
638638
i as u16,
639-
&[5; SIZE_OF_DATA_SHRED_PAYLOAD],
639+
&[5; /*legacy data-shred capacity:*/1051],
640640
ShredFlags::LAST_SHRED_IN_SLOT,
641641
1,
642642
2,

ledger/tests/shred.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use {
33
solana_entry::entry::Entry,
44
solana_ledger::shred::{
55
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder,
6-
MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD,
6+
MAX_DATA_SHREDS_PER_FEC_BLOCK,
77
},
88
solana_sdk::{
99
clock::Slot,
@@ -34,7 +34,7 @@ fn test_multi_fec_block_coding() {
3434
let num_entries = max_entries_per_n_shred(
3535
&entry,
3636
num_data_shreds as u64,
37-
Some(SIZE_OF_DATA_SHRED_PAYLOAD),
37+
Some(/*legacy data-shred capacity:*/ 1051),
3838
);
3939

4040
let entries: Vec<_> = (0..num_entries)
@@ -200,7 +200,7 @@ fn setup_different_sized_fec_blocks(
200200
let num_entries = max_entries_per_n_shred(
201201
&entry,
202202
num_shreds_per_iter as u64,
203-
Some(SIZE_OF_DATA_SHRED_PAYLOAD),
203+
Some(/*legacy data-shred capacity:*/ 1051),
204204
);
205205
let entries: Vec<_> = (0..num_entries)
206206
.map(|_| {

0 commit comments

Comments
 (0)