@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180180
181181template <typename Config> class MapAllocatorCache {
182182public:
183+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184+
185+ // TODO: Refactor the intrusive list to support non-pointer link type
186+ typedef struct {
187+ u16 Head;
188+ u16 Tail;
189+ } ListInfo;
190+
183191 void getStats (ScopedString *Str) {
184192 ScopedLock L (Mutex);
185193 uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197205 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198206 Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199207
200- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201- CachedBlock &Entry = Entries[I];
202- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203- " BlockSize: %zu %s\n " ,
204- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206- }
208+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210+ I = Entries[I].Next ) {
211+ CachedBlock &Entry = Entries[I];
212+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213+ " BlockSize: %zu %s\n " ,
214+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216+ }
217+ };
218+ printList (COMMITTED);
219+ printList (DECOMMITTED);
207220 }
208221
209222 // Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227240 setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228241
229242 // The cache is initially empty
230- LRUHead = CachedBlock::InvalidEntry;
231- LRUTail = CachedBlock::InvalidEntry;
243+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232247
233248 // Available entries will be retrieved starting from the beginning of the
234249 // Entries array
@@ -309,16 +324,22 @@ template <typename Config> class MapAllocatorCache {
309324
310325 // All excess entries are evicted from the cache
311326 while (needToEvict ()) {
327+ EntryListT EvictionListType;
328+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
329+ EvictionListType = COMMITTED;
330+ else
331+ EvictionListType = DECOMMITTED;
312332 // Save MemMaps of evicted entries to perform unmap outside of lock
313- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314- remove (LRUTail);
333+ EvictionMemMaps.push_back (
334+ Entries[EntryLists[EvictionListType].Tail ].MemMap );
335+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315336 }
316337
317- insert (Entry);
338+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318339
319340 if (OldestTime == 0 )
320341 OldestTime = Entry.Time ;
321- } while (0 );
342+ } while (0 ); // ScopedLock L(Mutex);
322343
323344 for (MemMapT &EvictMemMap : EvictionMemMaps)
324345 EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,17 +356,14 @@ template <typename Config> class MapAllocatorCache {
335356 // 10% of the requested size proved to be the optimal choice for
336357 // retrieving cached blocks after testing several options.
337358 constexpr u32 FragmentedBytesDivisor = 10 ;
338- bool Found = false ;
339359 CachedBlock Entry;
340360 uptr EntryHeaderPos = 0 ;
341- {
342- ScopedLock L (Mutex);
343- CallsToRetrieve++;
344- if (EntriesCount == 0 )
345- return false ;
346- u32 OptimalFitIndex = 0 ;
347- uptr MinDiff = UINTPTR_MAX;
348- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
361+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
362+ uptr MinDiff = UINTPTR_MAX;
363+ EntryListT OptimalFitListType = NONE;
364+
365+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
366+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
349367 I = Entries[I].Next ) {
350368 const uptr CommitBase = Entries[I].CommitBase ;
351369 const uptr CommitSize = Entries[I].CommitSize ;
@@ -355,36 +373,48 @@ template <typename Config> class MapAllocatorCache {
355373 if (HeaderPos > CommitBase + CommitSize)
356374 continue ;
357375 if (HeaderPos < CommitBase ||
358- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
376+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
359377 continue ;
360- }
361- Found = true ;
378+
362379 const uptr Diff = HeaderPos - CommitBase;
363- // immediately use a cached block if it's size is close enough to the
364- // requested size.
380+ // immediately use a cached block if it's size is close enough to
381+ // the requested size.
365382 const uptr MaxAllowedFragmentedBytes =
366383 (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367384 if (Diff <= MaxAllowedFragmentedBytes) {
368385 OptimalFitIndex = I;
369386 EntryHeaderPos = HeaderPos;
370- break ;
387+ OptimalFitListType = ListType;
388+ return true ;
371389 }
390+
372391 // keep track of the smallest cached block
373392 // that is greater than (AllocSize + HeaderSize)
374393 if (Diff > MinDiff)
375394 continue ;
376395 OptimalFitIndex = I;
377396 MinDiff = Diff;
397+ OptimalFitListType = ListType;
378398 EntryHeaderPos = HeaderPos;
379399 }
380- if (Found) {
381- Entry = Entries[OptimalFitIndex];
382- remove (OptimalFitIndex);
383- SuccessfulRetrieves++;
384- }
385- }
386- if (!Found)
387- return false ;
400+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
401+ };
402+
403+ {
404+ ScopedLock L (Mutex);
405+ CallsToRetrieve++;
406+ if (EntriesCount == 0 )
407+ return false ;
408+
409+ // Prioritize valid fit from COMMITTED entries over
410+ // optimal fit from DECOMMITTED entries
411+ if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
412+ return false ;
413+
414+ Entry = Entries[OptimalFitIndex];
415+ remove (OptimalFitIndex, OptimalFitListType);
416+ SuccessfulRetrieves++;
417+ } // ScopedLock L(Mutex);
388418
389419 *H = reinterpret_cast <LargeBlock::Header *>(
390420 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +478,15 @@ template <typename Config> class MapAllocatorCache {
448478 Quarantine[I].invalidate ();
449479 }
450480 }
451- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453- Entries[I].CommitSize , 0 );
454- }
481+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
482+ for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
483+ I = Entries[I].Next ) {
484+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
485+ Entries[I].CommitSize , 0 );
486+ }
487+ };
488+ disableLists (COMMITTED);
489+ disableLists (DECOMMITTED);
455490 QuarantinePos = -1U ;
456491 }
457492
@@ -466,7 +501,7 @@ template <typename Config> class MapAllocatorCache {
466501 return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467502 }
468503
469- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
504+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470505 DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471506
472507 // Cache should be populated with valid entries when not empty
@@ -475,66 +510,86 @@ template <typename Config> class MapAllocatorCache {
475510 u32 FreeIndex = AvailableHead;
476511 AvailableHead = Entries[AvailableHead].Next ;
477512
478- if (EntriesCount == 0 ) {
479- LRUTail = static_cast <u16 >(FreeIndex);
480- } else {
481- // Check list order
482- if (EntriesCount > 1 )
483- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
485- }
486-
487513 Entries[FreeIndex] = Entry;
488- Entries[FreeIndex].Next = LRUHead;
489- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490- LRUHead = static_cast <u16 >(FreeIndex);
514+ pushFront (FreeIndex, ListType);
491515 EntriesCount++;
492516
517+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
518+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
519+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
520+ }
493521 // Availability stack should not have available entries when all entries
494522 // are in use
495523 if (EntriesCount == Config::getEntriesArraySize ())
496524 DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497525 }
498526
499- void remove (uptr I) REQUIRES(Mutex) {
500- DCHECK (Entries[I].isValid ());
501-
502- Entries[I].invalidate ();
503-
504- if (I == LRUHead)
505- LRUHead = Entries[I].Next ;
527+ // Joins the entries adjacent to Entries[I], effectively
528+ // unlinking Entries[I] from the list
529+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
530+ if (I == EntryLists[ListType].Head )
531+ EntryLists[ListType].Head = Entries[I].Next ;
506532 else
507533 Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508534
509- if (I == LRUTail )
510- LRUTail = Entries[I].Prev ;
535+ if (I == EntryLists[ListType]. Tail )
536+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511537 else
512538 Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
539+ }
513540
541+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
542+ // Entries[I] onto the stack of available entries
543+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
544+ DCHECK (Entries[I].isValid ());
545+
546+ Entries[I].invalidate ();
547+
548+ unlink (I, ListType);
514549 Entries[I].Next = AvailableHead;
515550 AvailableHead = static_cast <u16 >(I);
516551 EntriesCount--;
517552
518553 // Cache should not have valid entries when not empty
519554 if (EntriesCount == 0 ) {
520- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
555+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
556+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
557+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
558+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522559 }
523560 }
524561
562+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
563+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
564+ EntryLists[ListType].Tail = static_cast <u16 >(I);
565+ else
566+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16 >(I);
567+
568+ Entries[I].Next = EntryLists[ListType].Head ;
569+ Entries[I].Prev = CachedBlock::InvalidEntry;
570+ EntryLists[ListType].Head = static_cast <u16 >(I);
571+ }
572+
525573 void empty () {
526574 MemMapT MapInfo[Config::getEntriesArraySize ()];
527575 uptr N = 0 ;
528576 {
529577 ScopedLock L (Mutex);
530- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531- if (!Entries[I].isValid ())
532- continue ;
533- MapInfo[N] = Entries[I].MemMap ;
534- remove (I);
535- N++;
536- }
578+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
579+ for (uptr I = EntryLists[ListType].Head ;
580+ I != CachedBlock::InvalidEntry;) {
581+ uptr ToRemove = I;
582+ I = Entries[I].Next ;
583+ MapInfo[N] = Entries[ToRemove].MemMap ;
584+ remove (ToRemove, ListType);
585+ N++;
586+ }
587+ };
588+ emptyList (COMMITTED);
589+ emptyList (DECOMMITTED);
537590 EntriesCount = 0 ;
591+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
592+ DCHECK (!Entries[I].isValid ());
538593 }
539594 for (uptr I = 0 ; I < N; I++) {
540595 MemMapT &MemMap = MapInfo[I];
@@ -561,8 +616,14 @@ template <typename Config> class MapAllocatorCache {
561616 OldestTime = 0 ;
562617 for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563618 releaseIfOlderThan (Quarantine[I], Time);
564- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
619+ for (u16 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
620+ I = Entries[I].Next ) {
621+ if (Entries[I].Time && Entries[I].Time <= Time) {
622+ unlink (I, COMMITTED);
623+ pushFront (I, DECOMMITTED);
624+ }
565625 releaseIfOlderThan (Entries[I], Time);
626+ }
566627 }
567628
568629 HybridMutex Mutex;
@@ -579,10 +640,12 @@ template <typename Config> class MapAllocatorCache {
579640 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580641 Quarantine GUARDED_BY (Mutex) = {};
581642
582- // The LRUHead of the cache is the most recently used cache entry
583- u16 LRUHead GUARDED_BY (Mutex) = 0;
584- // The LRUTail of the cache is the least recently used cache entry
585- u16 LRUTail GUARDED_BY (Mutex) = 0;
643+ // EntryLists stores the head and tail indices of all
644+ // lists being used to store valid cache entries.
645+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
646+ // COMMITTED entries have memory chunks that have not been released to the OS
647+ // DECOMMITTED entries have memory chunks that have been released to the OS
648+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586649 // The AvailableHead is the top of the stack of available entries
587650 u16 AvailableHead GUARDED_BY (Mutex) = 0;
588651};
0 commit comments