diff --git a/crates/bench/benches/special.rs b/crates/bench/benches/special.rs index c45066f9447..daf97d99ac1 100644 --- a/crates/bench/benches/special.rs +++ b/crates/bench/benches/special.rs @@ -141,7 +141,7 @@ fn serialize_benchmarks< Arc::new(table_schema), spacetimedb_table::indexes::SquashedOffset::COMMITTED_STATE, ); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = spacetimedb_table::blob_store::HashMapBlobStore::default(); let ptrs = data_pv diff --git a/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs b/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs index 7168be47c59..1ae4ae393a5 100644 --- a/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs +++ b/crates/core/src/db/datastore/locking_tx_datastore/datastore.rs @@ -1157,7 +1157,7 @@ mod tests { } fn get_datastore() -> Result { - Locking::bootstrap(Identity::ZERO, <_>::default()) + Locking::bootstrap(Identity::ZERO, PagePool::new_for_test()) } fn col(col: u16) -> ColList { diff --git a/crates/core/src/db/relational_db.rs b/crates/core/src/db/relational_db.rs index fed263135c3..81fc33e8cd4 100644 --- a/crates/core/src/db/relational_db.rs +++ b/crates/core/src/db/relational_db.rs @@ -1622,7 +1622,7 @@ pub mod tests_utils { history, durability, snapshot_repo, - PagePool::default(), + PagePool::new_for_test(), )?; assert_eq!(connected_clients.len(), expected_num_clients); let db = db.with_row_count(Self::row_count_fn()); @@ -1828,7 +1828,7 @@ mod tests { EmptyHistory::new(), None, None, - PagePool::default(), + PagePool::new_for_test(), ) { Ok(_) => { panic!("Allowed to open database twice") @@ -2770,7 +2770,7 @@ mod tests { Identity::ZERO, Some(&repo), Some(last_compress), - PagePool::default(), + PagePool::new_for_test(), )?; Ok(()) @@ -2795,7 +2795,8 @@ mod tests { ); let last = repo.latest_snapshot()?; - let stdb = RelationalDB::restore_from_snapshot_or_bootstrap(identity, Some(&repo), last, PagePool::default())?; + let stdb = + RelationalDB::restore_from_snapshot_or_bootstrap(identity, Some(&repo), last, PagePool::new_for_test())?; let out = TempDir::with_prefix("snapshot_test")?; let dir = SnapshotsPath::from_path_unchecked(out.path()); diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index a8a72427998..9396516cb34 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -972,7 +972,7 @@ pub async fn extract_schema(program_bytes: Box<[u8]>, host_type: HostType) -> an }; let runtimes = HostRuntimes::new(None); - let page_pool = PagePool::default(); + let page_pool = PagePool::new(None); let module_info = Host::try_init_in_memory_to_check(&runtimes, page_pool, database, program).await?; let module_info = Arc::into_inner(module_info).unwrap(); diff --git a/crates/snapshot/tests/remote.rs b/crates/snapshot/tests/remote.rs index f8818524680..65b1b7051fe 100644 --- a/crates/snapshot/tests/remote.rs +++ b/crates/snapshot/tests/remote.rs @@ -60,7 +60,7 @@ async fn can_sync_a_snapshot() -> anyhow::Result<()> { assert_eq!(stats.objects_written, total_objects); // Assert that the copied snapshot is valid. - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let dst_snapshot_full = dst_repo.read_snapshot(src.offset, &pool)?; Locking::restore_from_snapshot(dst_snapshot_full, pool)?; diff --git a/crates/table/benches/page_manager.rs b/crates/table/benches/page_manager.rs index 1f5592af01e..6391bf15bff 100644 --- a/crates/table/benches/page_manager.rs +++ b/crates/table/benches/page_manager.rs @@ -181,7 +181,7 @@ fn reserve_empty_page(c: &mut Criterion) { let mut group = c.benchmark_group("reserve_empty_page"); group.throughput(Throughput::Bytes(PAGE_DATA_SIZE as _)); group.bench_function("leave_uninit", |b| { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut pages = Pages::default(); b.iter(|| { let _ = black_box(pages.reserve_empty_page(&pool, RESERVE_SIZE)); @@ -189,7 +189,7 @@ fn reserve_empty_page(c: &mut Criterion) { }); let fill_with_zeros = |_, _, pages: &mut Pages| { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let page = pages.reserve_empty_page(&pool, RESERVE_SIZE).unwrap(); let page = pages.get_page_mut(page); unsafe { page.zero_data() }; @@ -222,7 +222,7 @@ fn insert_one_page_fixed_len(c: &mut Criterion) { rows_per_page::() as u64 * mem::size_of::() as u64, )); group.bench_function(name, |b| { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut pages = Pages::default(); // `0xa5` is the alternating bit pattern, which makes incorrect accesses obvious. insert_one_page_worth_fixed_len(&pool, &mut pages, visitor, &R::from_u64(0xa5a5a5a5_a5a5a5a5)); @@ -283,7 +283,7 @@ fn delete_one_page_fixed_len(c: &mut Criterion) { }; iter_time_with( b, - &mut (Pages::default(), PagePool::default()), + &mut (Pages::default(), PagePool::new_for_test()), pre, |ptrs, _, (pages, _)| { for ptr in ptrs { @@ -315,7 +315,7 @@ fn retrieve_one_page_fixed_len(c: &mut Criterion) { group.throughput(Throughput::Bytes(rows_per_page as u64 * mem::size_of::() as u64)); group.bench_function(name, |b| { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut pages = Pages::default(); let ptrs = fill_page_with_fixed_len_collect_row_pointers( @@ -367,7 +367,7 @@ fn insert_with_holes_fixed_len(c: &mut Criterion) { group.throughput(Throughput::Bytes(num_to_delete_in_bytes as u64)); group.bench_function(delete_ratio.to_string(), |b| { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut pages = Pages::default(); let mut rng = StdRng::seed_from_u64(0xa5a5a5a5_a5a5a5a5); @@ -437,7 +437,7 @@ fn copy_filter_fixed_len(c: &mut Criterion) { let val = R::from_u64(0xdeadbeef_0badbeef); for keep_ratio in [0.1, 0.25, 0.5, 0.75, 0.9, 1.0] { let visitor = &NullVarLenVisitor; - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut pages = Pages::default(); let num_pages = 16; @@ -537,7 +537,7 @@ fn table_insert_one_row(c: &mut Criterion) { let val = black_box(val.to_product()); // Insert before benching to alloc and fault in a page. - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut ctx = (table, NullBlobStore); let ptr = ctx.0.insert(&pool, &mut ctx.1, &val).unwrap().1.pointer(); let pre = |_, (table, bs): &mut (Table, NullBlobStore)| { @@ -588,7 +588,7 @@ fn table_delete_one_row(c: &mut Criterion) { let val = val.to_product(); // Insert before benching to alloc and fault in a page. - let mut ctx = (table, NullBlobStore, PagePool::default()); + let mut ctx = (table, NullBlobStore, PagePool::new_for_test()); let insert = |_: u64, (table, bs, pool): &mut (Table, NullBlobStore, PagePool)| { table.insert(pool, bs, &val).unwrap().1.pointer() }; @@ -637,7 +637,7 @@ fn table_extract_one_row(c: &mut Criterion) { let mut table = make_table_for_row_type::(name); let val = val.to_product(); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = NullBlobStore; let row = black_box(table.insert(&pool, &mut blob_store, &val).unwrap().1); group.bench_function(name, |b| { @@ -848,7 +848,7 @@ fn index_insert(c: &mut Criterion) { same_ratio: f64, ) { let make_row_move = &mut make_row; - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let (tbl, index_id, num_same, _) = make_table_with_same_ratio::(&pool, make_row_move, num_rows, same_ratio, false); let mut ctx = (tbl, NullBlobStore, pool); @@ -905,7 +905,7 @@ fn index_seek(c: &mut Criterion) { ) { let make_row_move = &mut make_row; let (tbl, index_id, num_same, num_diff) = - make_table_with_same_ratio::(&PagePool::default(), make_row_move, num_rows, same_ratio, unique); + make_table_with_same_ratio::(&PagePool::new_for_test(), make_row_move, num_rows, same_ratio, unique); group.bench_with_input( bench_id_for_index(name, num_rows, same_ratio, num_same, unique), @@ -972,7 +972,7 @@ fn index_delete(c: &mut Criterion) { same_ratio: f64, ) { let make_row_move = &mut make_row; - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let (mut tbl, index_id, num_same, _) = make_table_with_same_ratio::(&pool, make_row_move, num_rows, same_ratio, false); diff --git a/crates/table/src/eq.rs b/crates/table/src/eq.rs index 3c198a3b9b6..4dc054cbb67 100644 --- a/crates/table/src/eq.rs +++ b/crates/table/src/eq.rs @@ -241,7 +241,7 @@ mod test { AlgebraicType::product([AlgebraicType::U8, AlgebraicType::U32]), // xpppxxxx ])]); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let bs = &mut NullBlobStore; let mut table_a = crate::table::test::table(ty.clone()); let mut table_b = crate::table::test::table(ty); diff --git a/crates/table/src/eq_to_pv.rs b/crates/table/src/eq_to_pv.rs index 04b3e79ea12..47c676f4175 100644 --- a/crates/table/src/eq_to_pv.rs +++ b/crates/table/src/eq_to_pv.rs @@ -237,7 +237,7 @@ mod tests { // Turn `val` into a `RowRef`. let mut table = crate::table::test::table(ty); let blob_store = &mut HashMapBlobStore::default(); - let (_, row) = table.insert(&PagePool::default(), blob_store, &val).unwrap(); + let (_, row) = table.insert(&PagePool::new_for_test(), blob_store, &val).unwrap(); // Check eq algo. prop_assert_eq!(row, val); diff --git a/crates/table/src/page.rs b/crates/table/src/page.rs index 72a097ff70e..fb15955b9db 100644 --- a/crates/table/src/page.rs +++ b/crates/table/src/page.rs @@ -2493,7 +2493,7 @@ pub(crate) mod tests { #[test] fn serde_round_trip_whole_page() { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut page = Page::new(u64_row_size()); // Construct an empty page, ser/de it, and assert that it's still empty. diff --git a/crates/table/src/page_pool.rs b/crates/table/src/page_pool.rs index 239067643aa..56f9a938898 100644 --- a/crates/table/src/page_pool.rs +++ b/crates/table/src/page_pool.rs @@ -22,20 +22,26 @@ impl MemoryUsage for PagePool { } } -/// The default page pool has a size of 8 GiB. -impl Default for PagePool { - fn default() -> Self { - Self::new(None) +impl PagePool { + pub fn new_for_test() -> Self { + Self::new(Some(100 * size_of::())) } -} -impl PagePool { /// Returns a new page pool with `max_size` bytes rounded down to the nearest multiple of 64 KiB. /// - /// if no size is provided, a default of 8 GiB is used. + /// if no size is provided, a default of 1 page is used. pub fn new(max_size: Option) -> Self { - const DEFAULT_MAX_SIZE: usize = 8 * (1 << 30); // 8 GiB - const PAGE_SIZE: usize = 64 * (1 << 10); // 64 KiB, `size_of::()` + const PAGE_SIZE: usize = size_of::(); + // TODO(centril): This effectively disables the page pool. + // Currently, we have a test `test_index_scans`. + // The test sets up a `Location` table, like in BitCraft, with a `chunk` field, + // and populates it with 1000 different chunks with 1200 rows each. + // Then it asserts that the cold latency of an index scan on `chunk` takes < 1 ms. + // However, for reasons currently unknown to us, + // a large page pool, with capacity `1 << 26` bytes, on i7-7700K, 64GB RAM, + // will turn the latency into 30-40 ms. + // As a precaution, we disable the page pool by default. + const DEFAULT_MAX_SIZE: usize = PAGE_SIZE; // 1 page let queue_size = max_size.unwrap_or(DEFAULT_MAX_SIZE) / PAGE_SIZE; let inner = Arc::new(PagePoolInner::new(queue_size)); @@ -163,18 +169,6 @@ impl MemoryUsage for PagePoolInner { } } -impl Default for PagePoolInner { - fn default() -> Self { - const MAX_PAGE_MEM: usize = 8 * (1 << 30); // 8 GiB - - // 2 ^ 17 pages at most. - // Each slot in the pool is `(AtomicCell, Box)` which takes up 16 bytes. - // The pool will therefore have a fixed cost of 2^20 bytes, i.e., 2 MiB. - const MAX_POOLED_PAGES: usize = MAX_PAGE_MEM / size_of::(); - Self::new(MAX_POOLED_PAGES) - } -} - #[inline] fn inc(atomic: &AtomicUsize) { atomic.fetch_add(1, Ordering::Relaxed); @@ -246,7 +240,7 @@ mod tests { #[test] fn page_pool_returns_same_page() { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); assert_metrics(&pool, 0, 0, 0, 0); // Create a page and put it back. diff --git a/crates/table/src/read_column.rs b/crates/table/src/read_column.rs index 7f0cec6b3d7..424993b723b 100644 --- a/crates/table/src/read_column.rs +++ b/crates/table/src/read_column.rs @@ -382,7 +382,7 @@ mod test { /// inserting the row, then doing `AlgebraicValue::read_column` on each column of the row /// returns the expected value. fn read_column_same_value((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty); @@ -399,7 +399,7 @@ mod test { /// which does not match the actual column type /// returns an appropriate error. fn read_column_wrong_type((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty.clone()); @@ -430,7 +430,7 @@ mod test { /// i.e. with an out-of-bounds index, /// returns an appropriate error. fn read_column_out_of_bounds((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty.clone()); @@ -488,7 +488,7 @@ mod test { ($name:ident { $algebraic_type:expr => $rust_type:ty = $val:expr }) => { #[test] fn $name() { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ProductType::from_iter([$algebraic_type])); @@ -550,7 +550,7 @@ mod test { fn read_sum_tag_from_sum_with_payload() { let algebraic_type = AlgebraicType::sum([("a", AlgebraicType::U8), ("b", AlgebraicType::U16)]); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ProductType::from([algebraic_type])); diff --git a/crates/table/src/row_hash.rs b/crates/table/src/row_hash.rs index 3c536cc01f0..b29b04808fc 100644 --- a/crates/table/src/row_hash.rs +++ b/crates/table/src/row_hash.rs @@ -235,7 +235,7 @@ mod tests { fn pv_row_ref_hash_same_std_random_state((ty, val) in generate_typed_row()) { // Turn `val` into a `RowRef`. let mut table = crate::table::test::table(ty); - let pool = &PagePool::default(); + let pool = &PagePool::new_for_test(); let blob_store = &mut HashMapBlobStore::default(); let (_, row) = table.insert(pool, blob_store, &val).unwrap(); @@ -247,7 +247,7 @@ mod tests { #[test] fn pv_row_ref_hash_same_ahash((ty, val) in generate_typed_row()) { // Turn `val` into a `RowRef`. - let pool = &PagePool::default(); + let pool = &PagePool::new_for_test(); let blob_store = &mut HashMapBlobStore::default(); let mut table = crate::table::test::table(ty); let (_, row) = table.insert(pool, blob_store, &val).unwrap(); diff --git a/crates/table/src/static_layout.rs b/crates/table/src/static_layout.rs index e1a5d6e7a34..c0070a19e35 100644 --- a/crates/table/src/static_layout.rs +++ b/crates/table/src/static_layout.rs @@ -654,7 +654,7 @@ mod test { #[test] fn known_bsatn_same_as_bflatn_from((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = crate::table::test::table(ty); let Some(static_layout) = table.static_layout().cloned() else { @@ -683,7 +683,7 @@ mod test { #[test] fn known_bflatn_same_as_pv_from((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = crate::table::test::table(ty); let Some(static_layout) = table.static_layout().cloned() else { diff --git a/crates/table/src/table.rs b/crates/table/src/table.rs index c2b00c9cae5..5287b82f5c3 100644 --- a/crates/table/src/table.rs +++ b/crates/table/src/table.rs @@ -1997,7 +1997,7 @@ pub(crate) mod test { let index_schema = schema.indexes[0].clone(); let mut table = Table::new(schema.into(), SquashedOffset::COMMITTED_STATE); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let cols = ColList::new(0.into()); let algo = BTreeAlgorithm { columns: cols.clone() }.into(); @@ -2042,7 +2042,7 @@ pub(crate) mod test { fn insert_retrieve_body(ty: impl Into, val: impl Into) -> TestCaseResult { let val = val.into(); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty.into()); let (hash, row) = table.insert(&pool, &mut blob_store, &val).unwrap(); @@ -2104,7 +2104,7 @@ pub(crate) mod test { vals: Vec, indexed_columns: ColList, ) -> Result<(), TestCaseError> { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty.clone()); @@ -2178,7 +2178,7 @@ pub(crate) mod test { #[test] fn insert_delete_removed_from_pointer_map((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty); let (hash, row) = table.insert(&pool, &mut blob_store, &val).unwrap(); @@ -2210,7 +2210,7 @@ pub(crate) mod test { #[test] fn insert_duplicate_set_semantic((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty); @@ -2246,7 +2246,7 @@ pub(crate) mod test { #[test] fn insert_bsatn_same_as_pv((ty, val) in generate_typed_row()) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut bs_pv = HashMapBlobStore::default(); let mut table_pv = table(ty.clone()); let res_pv = table_pv.insert(&pool, &mut bs_pv, &val); @@ -2262,7 +2262,7 @@ pub(crate) mod test { #[test] fn row_size_reporting_matches_slow_implementations((ty, vals) in generate_typed_row_vec(128, 2048)) { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(ty.clone()); @@ -2304,7 +2304,7 @@ pub(crate) mod test { // Optimistically insert the `row` before checking any constraints // under the assumption that errors (unique constraint & set semantic violations) are rare. - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let (row_ref, blob_bytes) = table.insert_physically_bsatn(&pool, blob_store, row)?; let row_ptr = row_ref.pointer(); @@ -2319,7 +2319,7 @@ pub(crate) mod test { // Compare `scan_rows` against a simpler implementation. #[test] fn table_scan_iter_eq_flatmap() { - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let mut table = table(AlgebraicType::U64.into()); for v in 0..2u64.pow(14) { @@ -2346,7 +2346,7 @@ pub(crate) mod test { let pt = AlgebraicType::U64.into(); let pv = product![42u64]; let mut table = table(pt); - let pool = &PagePool::default(); + let pool = &PagePool::new_for_test(); let blob_store = &mut NullBlobStore; let (_, row_ref) = table.insert(pool, blob_store, &pv).unwrap(); @@ -2362,7 +2362,7 @@ pub(crate) mod test { #[test] fn test_blob_store_bytes() { let pt: ProductType = [AlgebraicType::String, AlgebraicType::I32].into(); - let pool = &PagePool::default(); + let pool = &PagePool::new_for_test(); let blob_store = &mut HashMapBlobStore::default(); let mut insert = |table: &mut Table, string, num| { table diff --git a/crates/table/src/table_index/mod.rs b/crates/table/src/table_index/mod.rs index 1fd96ea4497..6d08223e7da 100644 --- a/crates/table/src/table_index/mod.rs +++ b/crates/table/src/table_index/mod.rs @@ -1302,7 +1302,7 @@ mod test { fn remove_nonexistent_noop(((ty, cols, pv), is_unique) in (gen_row_and_cols(), any::())) { let mut index = new_index(&ty, &cols, is_unique); let mut table = table(ty); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let row_ref = table.insert(&pool, &mut blob_store, &pv).unwrap().1; prop_assert_eq!(index.delete(row_ref).unwrap(), false); @@ -1313,7 +1313,7 @@ mod test { fn insert_delete_noop(((ty, cols, pv), is_unique) in (gen_row_and_cols(), any::())) { let mut index = new_index(&ty, &cols, is_unique); let mut table = table(ty); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let row_ref = table.insert(&pool, &mut blob_store, &pv).unwrap().1; let value = get_fields(&cols, &pv); @@ -1334,7 +1334,7 @@ mod test { fn insert_again_violates_unique_constraint((ty, cols, pv) in gen_row_and_cols()) { let mut index = new_index(&ty, &cols, true); let mut table = table(ty); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let row_ref = table.insert(&pool, &mut blob_store, &pv).unwrap().1; let value = get_fields(&cols, &pv); @@ -1370,7 +1370,7 @@ mod test { let ty = ProductType::from_iter([AlgebraicType::U64]); let mut index = new_index(&ty, &cols, true); let mut table = table(ty); - let pool = PagePool::default(); + let pool = PagePool::new_for_test(); let mut blob_store = HashMapBlobStore::default(); let prev = needle - 1; diff --git a/crates/testing/src/modules.rs b/crates/testing/src/modules.rs index 8696f663c02..07701ba853e 100644 --- a/crates/testing/src/modules.rs +++ b/crates/testing/src/modules.rs @@ -238,7 +238,11 @@ pub static DEFAULT_CONFIG: Config = Config { /// For performance tests, do not persist to disk. pub static IN_MEMORY_CONFIG: Config = Config { storage: Storage::Disk, - page_pool_max_size: None, + // For some reason, a large page pool capacity causes `test_index_scans` to slow down, + // and makes the perf test for `chunk` go over 1ms. + // The threshold for failure on i7-7700K, 64GB RAM seems to be at 1 << 26. + // TODO(centril): investigate further why this size affects the benchmark. + page_pool_max_size: Some(1 << 16), }; /// Used to parse output from module logs.