Skip to content

Commit

Permalink
[scudo] Separated committed and decommitted entries. (#101409)
Browse files Browse the repository at this point in the history
Initially, the LRU list stored all mapped entries with no distinction
between the committed (non-madvise()'d) entries and decommitted
(madvise()'d) entries. Now these two types of entries re separated into
two lists, allowing future cache logic to branch depending on whether or
not entries are committed or decommitted. Furthermore, the retrieval
algorithm will prioritize committed entries over decommitted entries.
Specifically, committed entries that satisfy the MaxUnusedCachePages
requirement are retrieved before optimal-fit, decommitted entries.

This commit addresses the compiler errors raised
[here](#100818 (comment)).
  • Loading branch information
JoshuaMBa committed Aug 9, 2024
1 parent e711a0c commit 9f3ff8d
Showing 1 changed file with 144 additions and 76 deletions.
220 changes: 144 additions & 76 deletions compiler-rt/lib/scudo/standalone/secondary.h
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
class MapAllocatorCache {
public:
typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;

// TODO: Refactor the intrusive list to support non-pointer link type
typedef struct {
u16 Head;
u16 Tail;
} ListInfo;

void getStats(ScopedString *Str) {
ScopedLock L(Mutex);
uptr Integral;
Expand All @@ -201,13 +209,18 @@ class MapAllocatorCache {
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");

for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
CachedBlock &Entry = Entries[I];
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
"BlockSize: %zu %s\n",
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
}
auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
CachedBlock &Entry = Entries[I];
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
"BlockSize: %zu %s\n",
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
}
};
printList(COMMITTED);
printList(DECOMMITTED);
}

// Ensure the default maximum specified fits the array.
Expand All @@ -231,8 +244,10 @@ class MapAllocatorCache {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));

// The cache is initially empty
LRUHead = CachedBlock::InvalidEntry;
LRUTail = CachedBlock::InvalidEntry;
EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;

// Available entries will be retrieved starting from the beginning of the
// Entries array
Expand All @@ -250,7 +265,6 @@ class MapAllocatorCache {
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
u64 Time;
CachedBlock Entry;

Entry.CommitBase = CommitBase;
Entry.CommitSize = CommitSize;
Entry.BlockBegin = BlockBegin;
Expand Down Expand Up @@ -312,18 +326,27 @@ class MapAllocatorCache {
Entry = PrevEntry;
}

// All excess entries are evicted from the cache
// All excess entries are evicted from the cache.
// DECOMMITTED entries, being older than the COMMITTED
// entries, are evicted first in least recently used (LRU)
// fashioned followed by the COMMITTED entries
while (needToEvict()) {
EntryListT EvictionListType;
if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
EvictionListType = COMMITTED;
else
EvictionListType = DECOMMITTED;
// Save MemMaps of evicted entries to perform unmap outside of lock
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
remove(LRUTail);
EvictionMemMaps.push_back(
Entries[EntryLists[EvictionListType].Tail].MemMap);
remove(EntryLists[EvictionListType].Tail, EvictionListType);
}

insert(Entry);
insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);

if (OldestTime == 0)
OldestTime = Entry.Time;
} while (0);
} while (0); // ScopedLock L(Mutex);

for (MemMapT &EvictMemMap : EvictionMemMaps)
unmapCallBack(EvictMemMap);
Expand All @@ -340,17 +363,14 @@ class MapAllocatorCache {
// 10% of the requested size proved to be the optimal choice for
// retrieving cached blocks after testing several options.
constexpr u32 FragmentedBytesDivisor = 10;
bool Found = false;
CachedBlock Entry;
uptr OptimalFitIndex = CachedBlock::InvalidEntry;
uptr MinDiff = UINTPTR_MAX;
EntryListT OptimalFitListType = NONE;
EntryHeaderPos = 0;
{
ScopedLock L(Mutex);
CallsToRetrieve++;
if (EntriesCount == 0)
return {};
u32 OptimalFitIndex = 0;
uptr MinDiff = UINTPTR_MAX;
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;

auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
Expand All @@ -360,34 +380,48 @@ class MapAllocatorCache {
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
continue;
}
Found = true;

const uptr Diff = HeaderPos - CommitBase;
// immediately use a cached block if it's size is close enough to the
// requested size.
// immediately use a cached block if it's size is close enough to
// the requested size.
const uptr MaxAllowedFragmentedBytes =
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
if (Diff <= MaxAllowedFragmentedBytes) {
OptimalFitIndex = I;
EntryHeaderPos = HeaderPos;
break;
OptimalFitListType = ListType;
return true;
}

// keep track of the smallest cached block
// that is greater than (AllocSize + HeaderSize)
if (Diff > MinDiff)
continue;
OptimalFitIndex = I;
MinDiff = Diff;
OptimalFitListType = ListType;
EntryHeaderPos = HeaderPos;
}
if (Found) {
Entry = Entries[OptimalFitIndex];
remove(OptimalFitIndex);
SuccessfulRetrieves++;
}
}
return (OptimalFitIndex != CachedBlock::InvalidEntry);
};

{
ScopedLock L(Mutex);
CallsToRetrieve++;
if (EntriesCount == 0)
return {};

// Prioritize valid fit from COMMITTED entries over
// optimal fit from DECOMMITTED entries
if (!FindAvailableEntry(COMMITTED) && !FindAvailableEntry(DECOMMITTED))
return {};

Entry = Entries[OptimalFitIndex];
remove(OptimalFitIndex, OptimalFitListType);
SuccessfulRetrieves++;
} // ScopedLock L(Mutex);

return Entry;
}
Expand Down Expand Up @@ -432,10 +466,15 @@ class MapAllocatorCache {
Quarantine[I].invalidate();
}
}
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
Entries[I].CommitSize, 0);
}
auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
for (u32 I = EntryLists[EntryList].Head; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
Entries[I].CommitSize, 0);
}
};
disableLists(COMMITTED);
disableLists(DECOMMITTED);
QuarantinePos = -1U;
}

Expand All @@ -450,7 +489,7 @@ class MapAllocatorCache {
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
}

void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));

// Cache should be populated with valid entries when not empty
Expand All @@ -459,66 +498,86 @@ class MapAllocatorCache {
u32 FreeIndex = AvailableHead;
AvailableHead = Entries[AvailableHead].Next;

if (EntriesCount == 0) {
LRUTail = static_cast<u16>(FreeIndex);
} else {
// Check list order
if (EntriesCount > 1)
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
}

Entries[FreeIndex] = Entry;
Entries[FreeIndex].Next = LRUHead;
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
LRUHead = static_cast<u16>(FreeIndex);
pushFront(FreeIndex, ListType);
EntriesCount++;

if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
Entries[Entries[EntryLists[ListType].Head].Next].Time);
}
// Availability stack should not have available entries when all entries
// are in use
if (EntriesCount == Config::getEntriesArraySize())
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
}

void remove(uptr I) REQUIRES(Mutex) {
DCHECK(Entries[I].isValid());

Entries[I].invalidate();

if (I == LRUHead)
LRUHead = Entries[I].Next;
// Joins the entries adjacent to Entries[I], effectively
// unlinking Entries[I] from the list
void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
if (I == EntryLists[ListType].Head)
EntryLists[ListType].Head = Entries[I].Next;
else
Entries[Entries[I].Prev].Next = Entries[I].Next;

if (I == LRUTail)
LRUTail = Entries[I].Prev;
if (I == EntryLists[ListType].Tail)
EntryLists[ListType].Tail = Entries[I].Prev;
else
Entries[Entries[I].Next].Prev = Entries[I].Prev;
}

// Invalidates Entries[I], removes Entries[I] from list, and pushes
// Entries[I] onto the stack of available entries
void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
DCHECK(Entries[I].isValid());

Entries[I].invalidate();

unlink(I, ListType);
Entries[I].Next = AvailableHead;
AvailableHead = static_cast<u16>(I);
EntriesCount--;

// Cache should not have valid entries when not empty
if (EntriesCount == 0) {
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
}
}

inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
EntryLists[ListType].Tail = static_cast<u16>(I);
else
Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);

Entries[I].Next = EntryLists[ListType].Head;
Entries[I].Prev = CachedBlock::InvalidEntry;
EntryLists[ListType].Head = static_cast<u16>(I);
}

void empty() {
MemMapT MapInfo[Config::getEntriesArraySize()];
uptr N = 0;
{
ScopedLock L(Mutex);
for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
if (!Entries[I].isValid())
continue;
MapInfo[N] = Entries[I].MemMap;
remove(I);
N++;
}
auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
for (uptr I = EntryLists[ListType].Head;
I != CachedBlock::InvalidEntry;) {
uptr ToRemove = I;
I = Entries[I].Next;
MapInfo[N] = Entries[ToRemove].MemMap;
remove(ToRemove, ListType);
N++;
}
};
emptyList(COMMITTED);
emptyList(DECOMMITTED);
EntriesCount = 0;
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
DCHECK(!Entries[I].isValid());
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
Expand All @@ -545,8 +604,14 @@ class MapAllocatorCache {
OldestTime = 0;
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
releaseIfOlderThan(Quarantine[I], Time);
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
for (u16 I = EntryLists[COMMITTED].Head; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
if (Entries[I].Time && Entries[I].Time <= Time) {
unlink(I, COMMITTED);
pushFront(I, DECOMMITTED);
}
releaseIfOlderThan(Entries[I], Time);
}
}

HybridMutex Mutex;
Expand All @@ -563,10 +628,12 @@ class MapAllocatorCache {
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};

// The LRUHead of the cache is the most recently used cache entry
u16 LRUHead GUARDED_BY(Mutex) = 0;
// The LRUTail of the cache is the least recently used cache entry
u16 LRUTail GUARDED_BY(Mutex) = 0;
// EntryLists stores the head and tail indices of all
// lists being used to store valid cache entries.
// Currently there are lists storing COMMITTED and DECOMMITTED entries.
// COMMITTED entries have memory chunks that have not been released to the OS
// DECOMMITTED entries have memory chunks that have been released to the OS
ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
// The AvailableHead is the top of the stack of available entries
u16 AvailableHead GUARDED_BY(Mutex) = 0;
};
Expand Down Expand Up @@ -706,6 +773,7 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
}
return Ptr;
}

// As with the Primary, the size passed to this function includes any desired
// alignment, so that the frontend can align the user allocation. The hint
// parameter allows us to unmap spurious memory when dealing with larger
Expand Down

0 comments on commit 9f3ff8d

Please sign in to comment.