When playing with seastar::memory::scoped_large_allocation_warning_threshold
for debugging large allocations triggered in ScyllaDB's sstable reader
path, I found it very awkward that if I set the threshold to X, the
warnings are only emitted at X+1. Threshold, by definition, is the
point at which you start to experience something, so let's make this
interface respect that definition.
Signed-off-by: Raphael S. Carvalho <
raph...@scylladb.com>
---
src/core/memory.cc | 2 +-
tests/unit/alloc_test.cc | 17 +++++++++++++++++
2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/src/core/memory.cc b/src/core/memory.cc
index b8c55974..97f5d507 100644
--- a/src/core/memory.cc
+++ b/src/core/memory.cc
@@ -779,7 +779,7 @@ cpu_pages::warn_large_allocation(size_t size) {
void
inline
cpu_pages::check_large_allocation(size_t size) {
- if (size > large_allocation_warning_threshold) {
+ if (size >= large_allocation_warning_threshold) {
warn_large_allocation(size);
}
}
diff --git a/tests/unit/alloc_test.cc b/tests/unit/alloc_test.cc
index b5141d75..fdce9ef4 100644
--- a/tests/unit/alloc_test.cc
+++ b/tests/unit/alloc_test.cc
@@ -195,3 +195,20 @@ SEASTAR_TEST_CASE(test_realloc_nullptr) {
return make_ready_future<>();
}
#endif
+
+SEASTAR_TEST_CASE(test_large_allocation_warning_off_by_one) {
+#ifndef SEASTAR_DEFAULT_ALLOCATOR
+ constexpr size_t large_alloc_threshold = 1024*1024;
+ seastar::memory::scoped_large_allocation_warning_threshold mtg(large_alloc_threshold);
+ BOOST_REQUIRE(seastar::memory::get_large_allocation_warning_threshold() == large_alloc_threshold);
+ auto old_large_allocs_count = memory::stats().large_allocations();
+ auto obj = (char*)malloc(large_alloc_threshold);
+ *obj = 'c'; // to prevent compiler from considering this a dead allocation and optimizing it out
+
+ // Verify large allocation was detected by allocator.
+ BOOST_REQUIRE(memory::stats().large_allocations() == old_large_allocs_count+1);
+
+ free(obj);
+#endif
+ return make_ready_future<>();
+}
--
2.37.2