Skip to content

Commit 5ebe971

Browse files
committed
Switch low level chunked_buffer checks to debug
The checks in particular getting the `physical_bytes` introduced a slowdown in `BM_iteration_via_scalar_at` because: - Previously `bytes()` call could be inlined and it can no longer be inlined because of the runtime polymorphism - The non inlined function calls are taking a big part of the time. Removing the check in `operator[]` removes two function calls in release builds. This brings performance roughly inline to before the refactor.
1 parent d8998e6 commit 5ebe971

2 files changed

Lines changed: 20 additions & 21 deletions

File tree

cpp/arcticdb/column_store/chunked_buffer.hpp

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ class ChunkedBufferImpl {
342342

343343
uint8_t* bytes_at(size_t pos_bytes, size_t required) {
344344
auto [block, pos, _] = block_and_offset(pos_bytes);
345-
util::check(
345+
debug::check<ErrorCode::E_ASSERTION_FAILURE>(
346346
pos + required <= block->physical_bytes(),
347347
"Block overflow, position {} is greater than block capacity {}",
348348
pos + required,
@@ -357,7 +357,7 @@ class ChunkedBufferImpl {
357357

358358
uint8_t& operator[](size_t pos_bytes) {
359359
auto [block, pos, _] = block_and_offset(pos_bytes);
360-
util::check(
360+
debug::check<ErrorCode::E_ASSERTION_FAILURE>(
361361
pos < block->physical_bytes(),
362362
"Block overflow, position {} is greater than block capacity {}",
363363
pos,
@@ -390,19 +390,16 @@ class ChunkedBufferImpl {
390390
[[nodiscard]] uint8_t* data() { return const_cast<uint8_t*>(const_cast<const ChunkedBufferImpl*>(this)->data()); }
391391

392392
void check_bytes(size_t pos_bytes, size_t required_bytes) const {
393-
if (pos_bytes + required_bytes > bytes()) {
394-
std::string err = fmt::format(
395-
"Cursor overflow in chunked_buffer ptr_cast, cannot read {} bytes from a buffer of size {} with "
396-
"cursor "
397-
"at {}, as it would require {} bytes. ",
398-
required_bytes,
399-
bytes(),
400-
pos_bytes,
401-
pos_bytes + required_bytes
402-
);
403-
ARCTICDB_DEBUG(log::storage(), err);
404-
throw std::invalid_argument(err);
405-
}
393+
debug::check<ErrorCode::E_ASSERTION_FAILURE>(
394+
pos_bytes + required_bytes <= bytes(),
395+
"Cursor overflow in chunked_buffer ptr_cast, cannot read {} bytes from a buffer of size {} with "
396+
"cursor "
397+
"at {}, as it would require {} bytes. ",
398+
required_bytes,
399+
bytes(),
400+
pos_bytes,
401+
pos_bytes + required_bytes
402+
);
406403
}
407404

408405
// Returns a vector of continuous buffers, each designated by a pointer and size

cpp/arcticdb/column_store/test/benchmark_memory_segment.cpp

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,8 @@ static void BM_iterate_with_scalar_at(benchmark::State& state) {
114114
[[maybe_unused]] auto acc = 0ull;
115115
for (auto _ : state) {
116116
// Starting col from 1 to skip the index
117-
for (auto col=1; col<=num_cols; ++col) {
118-
for (auto row=0; row<num_rows; ++row) {
117+
for (auto col = 1; col <= num_cols; ++col) {
118+
for (auto row = 0; row < num_rows; ++row) {
119119
acc += segment.scalar_at<uint64_t>(row, col).value_or(0);
120120
}
121121
}
@@ -130,12 +130,14 @@ static void BM_iterate_with_iterator(benchmark::State& state) {
130130
[[maybe_unused]] auto acc = 0ull;
131131
for (auto _ : state) {
132132
// Starting col from 1 to skip the index
133-
for (auto col=1; col<=num_cols; ++col) {
133+
for (auto col = 1; col <= num_cols; ++col) {
134134
auto data = segment.column(col).data();
135135
using tdt = ScalarTagType<DataTypeTag<DataType::UINT64>>;
136-
std::for_each(data.cbegin<tdt, IteratorType::REGULAR, IteratorDensity::DENSE>(), data.cend<tdt, IteratorType::REGULAR, IteratorDensity::DENSE>(), [&](auto v) {
137-
acc += v;
138-
});
136+
std::for_each(
137+
data.cbegin<tdt, IteratorType::REGULAR, IteratorDensity::DENSE>(),
138+
data.cend<tdt, IteratorType::REGULAR, IteratorDensity::DENSE>(),
139+
[&](auto v) { acc += v; }
140+
);
139141
}
140142
}
141143
}

0 commit comments

Comments
 (0)