diff --git a/pw_allocator/bump_allocator.cc b/pw_allocator/bump_allocator.cc index 114e4301d..407f104e5 100644 --- a/pw_allocator/bump_allocator.cc +++ b/pw_allocator/bump_allocator.cc @@ -25,11 +25,13 @@ void BumpAllocator::Init(ByteSpan region) { } void* BumpAllocator::DoAllocate(Layout layout) { + size_t remaining = remaining_.size(); ByteSpan region = GetAlignedSubspan(remaining_, layout.alignment()); if (region.size() < layout.size()) { return nullptr; } remaining_ = region.subspan(layout.size()); + allocated_ += remaining - remaining_.size(); return region.data(); } diff --git a/pw_allocator/fallback_allocator.cc b/pw_allocator/fallback_allocator.cc index b86f11e38..34ad0aba3 100644 --- a/pw_allocator/fallback_allocator.cc +++ b/pw_allocator/fallback_allocator.cc @@ -46,6 +46,10 @@ bool FallbackAllocator::DoResize(void* ptr, size_t new_size) { : secondary_.Resize(ptr, new_size); } +size_t FallbackAllocator::DoGetAllocated() const { + return primary_.GetAllocated() + secondary_.GetAllocated(); +} + Result FallbackAllocator::DoGetInfo(InfoType info_type, const void* ptr) const { Result primary = GetInfo(primary_, info_type, ptr); diff --git a/pw_allocator/public/pw_allocator/allocator.h b/pw_allocator/public/pw_allocator/allocator.h index 14d0a5322..adb724ea1 100644 --- a/pw_allocator/public/pw_allocator/allocator.h +++ b/pw_allocator/public/pw_allocator/allocator.h @@ -160,6 +160,10 @@ class Allocator : public Deallocator { return DoReallocate(ptr, old_layout, new_size); } + /// Returns the total bytes that have been allocated by this allocator, or + /// `size_t(-1)` if this allocator does not track its total allocated bytes. + size_t GetAllocated() const { return DoGetAllocated(); } + protected: /// TODO(b/326509341): Remove when downstream consumers migrate. constexpr Allocator() = default; @@ -203,6 +207,12 @@ class Allocator : public Deallocator { /// Do not use this method. It will be removed. /// TODO(b/326509341): Remove when downstream consumers migrate. virtual void* DoReallocate(void* ptr, Layout old_layout, size_t new_size); + + /// Virtual `GetAllocated` function that can be overridden by derived classes. + /// + /// The default implementation simply returns `size_t(-1)`, indicating that + /// tracking total allocated bytes is not supported. + virtual size_t DoGetAllocated() const { return size_t(-1); } }; namespace allocator { diff --git a/pw_allocator/public/pw_allocator/block_allocator.h b/pw_allocator/public/pw_allocator/block_allocator.h index 8c2b8d7d0..ec90b782b 100644 --- a/pw_allocator/public/pw_allocator/block_allocator.h +++ b/pw_allocator/public/pw_allocator/block_allocator.h @@ -17,6 +17,7 @@ #include "pw_allocator/allocator.h" #include "pw_allocator/block/detailed_block.h" +#include "pw_allocator/block/result.h" #include "pw_allocator/capability.h" #include "pw_allocator/fragmentation.h" #include "pw_assert/assert.h" @@ -186,6 +187,9 @@ class BlockAllocator : public internal::GenericBlockAllocator { /// @copydoc Allocator::Resize bool DoResize(void* ptr, size_t new_size) override; + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override { return allocated_; } + /// @copydoc Deallocator::GetInfo Result DoGetInfo(InfoType info_type, const void* ptr) const override; @@ -233,6 +237,7 @@ class BlockAllocator : public internal::GenericBlockAllocator { // Represents the range of blocks for this allocator. size_t capacity_ = 0; + size_t allocated_ = 0; BlockType* first_ = nullptr; BlockType* last_ = nullptr; uint16_t unpoisoned_ = 0; @@ -303,10 +308,19 @@ void* BlockAllocator::DoAllocate(Layout layout) { return nullptr; } BlockType* block = result.block(); - - // New free blocks may be created when allocating. - if (result.prev() == BlockResultPrev::kSplitNew) { - RecycleBlock(block->Prev()); + allocated_ += block->OuterSize(); + switch (result.prev()) { + case BlockResultPrev::kSplitNew: + // New free blocks may be created when allocating. + RecycleBlock(block->Prev()); + break; + case BlockResultPrev::kResizedLarger: + // Extra bytes may be appended to the previous block. + allocated_ += result.size(); + break; + case BlockResultPrev::kUnchanged: + case BlockResultPrev::kResizedSmaller: + break; } if (result.next() == BlockResultNext::kSplitNew) { RecycleBlock(block->Next()); @@ -337,10 +351,16 @@ void BlockAllocator::DoDeallocate(void* ptr) { } // Free the block and merge it with its neighbors, if possible. + allocated_ -= block->OuterSize(); auto free_result = BlockType::Free(std::move(block)); block = free_result.block(); UpdateLast(block); + if (free_result.prev() == BlockResultPrev::kResizedSmaller) { + // Bytes were reclaimed from the previous block. + allocated_ -= free_result.size(); + } + if constexpr (kPoisonInterval != 0) { ++unpoisoned_; if (unpoisoned_ >= kPoisonInterval) { @@ -366,9 +386,12 @@ bool BlockAllocator::DoResize(void* ptr, ReserveBlock(block->Next()); } + size_t old_size = block->OuterSize(); if (!block->Resize(new_size).ok()) { return false; } + allocated_ -= old_size; + allocated_ += block->OuterSize(); UpdateLast(block); if (auto* next = block->Next(); next != nullptr && next->IsFree()) { diff --git a/pw_allocator/public/pw_allocator/bump_allocator.h b/pw_allocator/public/pw_allocator/bump_allocator.h index 6687b9054..5552651d1 100644 --- a/pw_allocator/public/pw_allocator/bump_allocator.h +++ b/pw_allocator/public/pw_allocator/bump_allocator.h @@ -13,6 +13,8 @@ // the License. #pragma once +#include + #include "pw_allocator/allocator.h" #include "pw_allocator/capability.h" #include "pw_bytes/span.h" @@ -130,9 +132,13 @@ class BumpAllocator : public Allocator { /// @copydoc Allocator::Deallocate void DoDeallocate(void*) override; + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override { return allocated_; } + /// Frees any owned objects and discards remaining memory. void Reset(); + size_t allocated_ = 0; ByteSpan remaining_; internal::GenericOwned* owned_ = nullptr; }; diff --git a/pw_allocator/public/pw_allocator/fallback_allocator.h b/pw_allocator/public/pw_allocator/fallback_allocator.h index 4eb51f9ac..34cd43412 100644 --- a/pw_allocator/public/pw_allocator/fallback_allocator.h +++ b/pw_allocator/public/pw_allocator/fallback_allocator.h @@ -47,6 +47,9 @@ class FallbackAllocator : public Allocator { /// @copydoc Allocator::Resize bool DoResize(void* ptr, size_t new_size) override; + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override; + /// @copydoc Deallocator::GetInfo Result DoGetInfo(InfoType info_type, const void* ptr) const override; diff --git a/pw_allocator/public/pw_allocator/synchronized_allocator.h b/pw_allocator/public/pw_allocator/synchronized_allocator.h index af5d8726a..79d3e4ca8 100644 --- a/pw_allocator/public/pw_allocator/synchronized_allocator.h +++ b/pw_allocator/public/pw_allocator/synchronized_allocator.h @@ -64,6 +64,12 @@ class SynchronizedAllocator : public Allocator { return allocator->Reallocate(ptr, new_layout); } + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override { + Pointer allocator = borrowable_.acquire(); + return allocator->GetAllocated(); + } + /// @copydoc Deallocator::GetInfo Result DoGetInfo(InfoType info_type, const void* ptr) const override { Pointer allocator = borrowable_.acquire(); diff --git a/pw_allocator/public/pw_allocator/testing.h b/pw_allocator/public/pw_allocator/testing.h index 0b3e8a67f..f8f9a82a9 100644 --- a/pw_allocator/public/pw_allocator/testing.h +++ b/pw_allocator/public/pw_allocator/testing.h @@ -157,6 +157,9 @@ class AllocatorForTest : public Allocator { return tracker_.Resize(ptr, new_size); } + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override { return tracker_.GetAllocated(); } + /// @copydoc Deallocator::GetInfo Result DoGetInfo(InfoType info_type, const void* ptr) const override { return GetInfo(tracker_, info_type, ptr); diff --git a/pw_allocator/public/pw_allocator/tracking_allocator.h b/pw_allocator/public/pw_allocator/tracking_allocator.h index 546533b9e..fcc359c17 100644 --- a/pw_allocator/public/pw_allocator/tracking_allocator.h +++ b/pw_allocator/public/pw_allocator/tracking_allocator.h @@ -80,6 +80,9 @@ class TrackingAllocator : public Allocator { /// @copydoc Allocator::Reallocate void* DoReallocate(void* ptr, Layout new_layout) override; + /// @copydoc Allocator::GetAllocated + size_t DoGetAllocated() const override { return allocator_.GetAllocated(); } + /// @copydoc Deallocator::GetInfo Result DoGetInfo(InfoType info_type, const void* ptr) const override { return GetInfo(allocator_, info_type, ptr); @@ -94,67 +97,81 @@ class TrackingAllocator : public Allocator { template void* TrackingAllocator::DoAllocate(Layout layout) { Layout requested = layout; + size_t allocated = allocator_.GetAllocated(); void* new_ptr = allocator_.Allocate(requested); if (new_ptr == nullptr) { metrics_.RecordFailure(requested.size()); return nullptr; } - Layout allocated = Layout::Unwrap(GetAllocatedLayout(new_ptr)); metrics_.IncrementAllocations(); metrics_.ModifyRequested(requested.size(), 0); - metrics_.ModifyAllocated(allocated.size(), 0); + metrics_.ModifyAllocated(allocator_.GetAllocated(), allocated); return new_ptr; } template void TrackingAllocator::DoDeallocate(void* ptr) { Layout requested = Layout::Unwrap(GetRequestedLayout(ptr)); - Layout allocated = Layout::Unwrap(GetAllocatedLayout(ptr)); + size_t allocated = allocator_.GetAllocated(); allocator_.Deallocate(ptr); metrics_.IncrementDeallocations(); metrics_.ModifyRequested(0, requested.size()); - metrics_.ModifyAllocated(0, allocated.size()); + metrics_.ModifyAllocated(allocator_.GetAllocated(), allocated); } template bool TrackingAllocator::DoResize(void* ptr, size_t new_size) { Layout requested = Layout::Unwrap(GetRequestedLayout(ptr)); - Layout allocated = Layout::Unwrap(GetAllocatedLayout(ptr)); + size_t allocated = allocator_.GetAllocated(); Layout new_requested(new_size, requested.alignment()); if (!allocator_.Resize(ptr, new_requested.size())) { metrics_.RecordFailure(new_size); return false; } - Layout new_allocated = Layout::Unwrap(GetAllocatedLayout(ptr)); metrics_.IncrementResizes(); metrics_.ModifyRequested(new_requested.size(), requested.size()); - metrics_.ModifyAllocated(new_allocated.size(), allocated.size()); + metrics_.ModifyAllocated(allocator_.GetAllocated(), allocated); return true; } template void* TrackingAllocator::DoReallocate(void* ptr, Layout new_layout) { + // Check if possible to resize in place with no additional overhead. Layout requested = Layout::Unwrap(GetRequestedLayout(ptr)); - Layout allocated = Layout::Unwrap(GetAllocatedLayout(ptr)); + size_t allocated = allocator_.GetAllocated(); Layout new_requested(new_layout.size(), requested.alignment()); - void* new_ptr = allocator_.Reallocate(ptr, new_requested); + if (allocator_.Resize(ptr, new_layout.size())) { + metrics_.IncrementReallocations(); + metrics_.ModifyRequested(new_requested.size(), requested.size()); + metrics_.ModifyAllocated(allocator_.GetAllocated(), allocated); + return ptr; + } + + // Need to move data to a brand new allocation. + // In order to properly record the peak allocation, this method needs to + // perform the steps of allocating, copying, and deallocating memory, and + // recording metrics in the interim steps. + Result old_layout = GetUsableLayout(ptr); + if (!old_layout.ok()) { + metrics_.RecordFailure(new_layout.size()); + return nullptr; + } + void* new_ptr = allocator_.Allocate(new_layout); if (new_ptr == nullptr) { - metrics_.RecordFailure(new_requested.size()); + metrics_.RecordFailure(new_layout.size()); return nullptr; } + // Update with transient allocation to ensure peak metrics are correct. + size_t transient_allocated = allocator_.GetAllocated(); + metrics_.ModifyAllocated(transient_allocated, allocated); + if (ptr != nullptr) { + std::memcpy(new_ptr, ptr, std::min(new_layout.size(), old_layout->size())); + allocator_.Deallocate(ptr); + } metrics_.IncrementReallocations(); metrics_.ModifyRequested(new_requested.size(), requested.size()); - Layout new_allocated = Layout::Unwrap(GetAllocatedLayout(new_ptr)); - if (ptr != new_ptr) { - // Reallocate performed "alloc, copy, free". Increment and decrement - // seperately in order to ensure "peak" metrics are correct. - metrics_.ModifyAllocated(new_allocated.size(), 0); - metrics_.ModifyAllocated(0, allocated.size()); - } else { - // Reallocate performed "resize" without additional overhead. - metrics_.ModifyAllocated(new_allocated.size(), allocated.size()); - } + metrics_.ModifyAllocated(allocator_.GetAllocated(), transient_allocated); return new_ptr; } diff --git a/pw_allocator/tracking_allocator_test.cc b/pw_allocator/tracking_allocator_test.cc index c5596052e..e354a972f 100644 --- a/pw_allocator/tracking_allocator_test.cc +++ b/pw_allocator/tracking_allocator_test.cc @@ -382,4 +382,83 @@ TEST_F(TrackingAllocatorTest, ReallocateFailure) { EXPECT_METRICS_EQ(expected, metrics); } +TEST_F(TrackingAllocatorTest, CorrectlyAccountsForShiftedBytes) { + const TestMetrics& metrics = tracker_.metrics(); + ExpectedValues expected; + + // Find an alignment greater than two block headers. + size_t alignment = 1; + while (alignment <= (BlockType::kBlockOverhead * 2)) { + alignment *= 2; + } + + // Allocate one block to align the usable space of the following block. + Layout layout1(alignment - BlockType::kBlockOverhead, alignment); + void* ptr1 = tracker_.Allocate(layout1); + ASSERT_NE(ptr1, nullptr); + auto* block1 = BlockType::FromUsableSpace(ptr1); + size_t ptr1_allocated = block1->OuterSize(); + expected.AddRequestedBytes(layout1.size()); + expected.AddAllocatedBytes(ptr1_allocated); + expected.num_allocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Allocate a second block that ends two block headers from an alignment + // boundary. + Layout layout2(alignment - (BlockType::kBlockOverhead * 2), alignment); + void* ptr2 = tracker_.Allocate(layout2); + ASSERT_NE(ptr2, nullptr); + auto* block2 = BlockType::FromUsableSpace(ptr2); + EXPECT_EQ(block2->InnerSize(), layout2.size()); + size_t ptr2_allocated = block2->OuterSize(); + expected.AddRequestedBytes(layout2.size()); + expected.AddAllocatedBytes(ptr2_allocated); + expected.num_allocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Allocate a third block to prevent the second block from being coalesced. + // The extra bytes should be appended to the second block. + Layout layout3(1, alignment); + void* ptr3 = tracker_.Allocate(layout3); + ASSERT_NE(ptr3, nullptr); + auto* block3 = BlockType::FromUsableSpace(ptr3); + size_t ptr3_allocated = block3->OuterSize(); + size_t shifted = block2->OuterSize() - ptr2_allocated; + expected.AddRequestedBytes(layout3.size()); + expected.AddAllocatedBytes(ptr3_allocated + shifted); + expected.num_allocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Free the second block, which is larger than when it was allocated. + tracker_.Deallocate(ptr2); + expected.requested_bytes -= layout2.size(); + expected.allocated_bytes -= ptr2_allocated + shifted; + expected.num_deallocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Allocate the second block again. The trailing space should be appended. + ptr2 = tracker_.Allocate(layout2); + ASSERT_NE(ptr2, nullptr); + block2 = BlockType::FromUsableSpace(ptr2); + EXPECT_EQ(block2->OuterSize(), ptr2_allocated + shifted); + expected.AddRequestedBytes(layout2.size()); + expected.AddAllocatedBytes(ptr2_allocated + shifted); + expected.num_allocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Free the third block, which should reclaim space from the second block. + tracker_.Deallocate(ptr3); + expected.requested_bytes -= layout3.size(); + expected.allocated_bytes -= ptr3_allocated + shifted; + expected.num_deallocations += 1; + EXPECT_METRICS_EQ(expected, metrics); + + // Free the second block, which is now smaller than when it was allocated. + tracker_.Deallocate(ptr2); + expected.requested_bytes -= layout2.size(); + expected.allocated_bytes -= ptr2_allocated; + expected.num_deallocations += 1; + EXPECT_METRICS_EQ(expected, metrics); +} + } // namespace