diff --git a/AK/ByteBuffer.h b/AK/ByteBuffer.h index 188e88c50e..f7f54239e7 100644 --- a/AK/ByteBuffer.h +++ b/AK/ByteBuffer.h @@ -289,6 +289,12 @@ private: NEVER_INLINE ErrorOr try_ensure_capacity_slowpath(size_t new_capacity) { + // When we are asked to raise the capacity by very small amounts, + // the caller is perhaps appending very little data in many calls. + // To avoid copying the entire ByteBuffer every single time, + // we raise the capacity exponentially, by a factor of roughly 1.5. + // This is most noticable in Lagom, where kmalloc_good_size is just a no-op. + new_capacity = max(new_capacity, (capacity() * 3) / 2); new_capacity = kmalloc_good_size(new_capacity); auto* new_buffer = (u8*)kmalloc(new_capacity); if (!new_buffer) diff --git a/Tests/AK/TestByteBuffer.cpp b/Tests/AK/TestByteBuffer.cpp index 010972430e..29ed85d32f 100644 --- a/Tests/AK/TestByteBuffer.cpp +++ b/Tests/AK/TestByteBuffer.cpp @@ -33,6 +33,14 @@ TEST_CASE(equality_operator) EXPECT_EQ(d == d, true); } +BENCHMARK_CASE(append) +{ + ByteBuffer bb; + for (size_t i = 0; i < 1000000; ++i) { + bb.append(static_cast(i)); + } +} + /* * FIXME: These `negative_*` tests should cause precisely one compilation error * each, and always for the specified reason. Currently we do not have a harness