Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[scudo] Support the mode of disabling primary cache #121351

Open
wants to merge 1 commit into
base: main
Choose a base branch
from

Conversation

ChiaHungDuan
Copy link
Contributor

In this mode, no primary blocks will be cahced except the batch class. It remains the same page releasing strategy as
SizeClassAllocatorLocalCache so we are expecting the same page releasing frequency in primary allocator.

In this mode, no primary blocks will be cahced except the batch class.
It remains the same page releasing strategy as
SizeClassAllocatorLocalCache so we are expecting the same page releasing
frequency in primary allocator.
@llvmbot
Copy link
Member

llvmbot commented Dec 30, 2024

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: None (ChiaHungDuan)

Changes

In this mode, no primary blocks will be cahced except the batch class. It remains the same page releasing strategy as
SizeClassAllocatorLocalCache so we are expecting the same page releasing frequency in primary allocator.


Full diff: https://github.com/llvm/llvm-project/pull/121351.diff

5 Files Affected:

  • (modified) compiler-rt/lib/scudo/standalone/allocator_config.def (+2)
  • (modified) compiler-rt/lib/scudo/standalone/local_cache.h (+142-5)
  • (modified) compiler-rt/lib/scudo/standalone/primary64.h (+3-1)
  • (modified) compiler-rt/lib/scudo/standalone/tests/combined_test.cpp (+43-1)
  • (modified) compiler-rt/lib/scudo/standalone/type_traits.h (+8)
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index ce37b1cfaedccc..7d50315b35daeb 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -78,6 +78,8 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
 
 // PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
 //
+PRIMARY_OPTIONAL(const bool, EnableCache, true)
+
 // The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale).
 PRIMARY_OPTIONAL(const uptr, CompactPtrScale, SCUDO_MIN_ALIGNMENT_LOG)
 
diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h
index 46d6affdc033b1..b14a72cdaaa2fc 100644
--- a/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -161,11 +161,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
     }
   }
 
-  void destroyBatch(uptr ClassId, void *B) {
-    if (ClassId != BatchClassId)
-      deallocate(BatchClassId, B);
-  }
-
   NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
     const u16 NumBlocksRefilled =
         Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
@@ -184,6 +179,148 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
   }
 };
 
+template <class SizeClassAllocator> struct NoCache {
+  typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+  typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+  void init(GlobalStats *S, SizeClassAllocator *A) {
+    Stats.init();
+    if (LIKELY(S))
+      S->link(&Stats);
+    Allocator = A;
+    initCache();
+  }
+
+  void destroy(GlobalStats *S) {
+    if (LIKELY(S))
+      S->unlink(&Stats);
+  }
+
+  void *allocate(uptr ClassId) {
+    CompactPtrT CompactPtr;
+    uptr NumBlocksPopped = Allocator->popBlocks(this, ClassId, &CompactPtr, 1U);
+    if (NumBlocksPopped == 0)
+      return nullptr;
+    DCHECK_EQ(NumBlocksPopped, 1U);
+    const PerClass *C = &PerClassArray[ClassId];
+    Stats.add(StatAllocated, C->ClassSize);
+    Stats.sub(StatFree, C->ClassSize);
+    return Allocator->decompactPtr(ClassId, CompactPtr);
+  }
+
+  bool deallocate(uptr ClassId, void *P) {
+    CHECK_LT(ClassId, NumClasses);
+
+    if (ClassId == BatchClassId)
+      return deallocateBatchClassBlock(P);
+
+    CompactPtrT CompactPtr =
+        Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
+    Allocator->pushBlocks(this, ClassId, &CompactPtr, 1U);
+    PerClass *C = &PerClassArray[ClassId];
+    Stats.sub(StatAllocated, C->ClassSize);
+    Stats.add(StatFree, C->ClassSize);
+
+    // The following adopts the same strategy of allocator draining as
+    // SizeClassAllocatorLocalCache so that they have the same hint for doing
+    // page release.
+    ++C->Count;
+    const bool SuggestDraining = C->Count == C->MaxCount;
+    if (SuggestDraining)
+      C->Count = 0;
+    return SuggestDraining;
+  }
+
+  void *getBatchClassBlock() {
+    PerClass *C = &PerClassArray[BatchClassId];
+    if (C->Count == 0) {
+      const u16 NumBlocksRefilled = Allocator->popBlocks(
+          this, BatchClassId, BatchClassStorage, C->MaxCount);
+      if (NumBlocksRefilled == 0)
+        reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
+      DCHECK_LE(NumBlocksRefilled, SizeClassMap::MaxNumCachedHint);
+      C->Count = NumBlocksRefilled;
+    }
+
+    const uptr ClassSize = C->ClassSize;
+    CompactPtrT CompactP = BatchClassStorage[--C->Count];
+    Stats.add(StatAllocated, ClassSize);
+    Stats.sub(StatFree, ClassSize);
+
+    return Allocator->decompactPtr(BatchClassId, CompactP);
+  }
+
+  LocalStats &getStats() { return Stats; }
+
+  void getStats(ScopedString *Str) { Str->append("    No block is cached.\n"); }
+
+  bool isEmpty() const {
+    const PerClass *C = &PerClassArray[BatchClassId];
+    return C->Count == 0;
+  }
+  void drain() {
+    PerClass *C = &PerClassArray[BatchClassId];
+    if (C->Count > 0) {
+      Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
+      C->Count = 0;
+    }
+  }
+
+  static u16 getMaxCached(uptr Size) {
+    return Min(SizeClassMap::MaxNumCachedHint,
+               SizeClassMap::getMaxCachedHint(Size));
+  }
+
+private:
+  static const uptr NumClasses = SizeClassMap::NumClasses;
+  static const uptr BatchClassId = SizeClassMap::BatchClassId;
+  struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
+    u16 Count = 0;
+    u16 MaxCount;
+    // Note: ClassSize is zero for the transfer batch.
+    uptr ClassSize;
+  };
+  PerClass PerClassArray[NumClasses] = {};
+  // Popping BatchClass blocks requires taking a certain amount of blocks at
+  // once. This restriction comes from how we manage the storing of BatchClass
+  // in the primary allocator. See more details in `popBlocksImpl` in the
+  // primary allocator.
+  CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint];
+  LocalStats Stats;
+  SizeClassAllocator *Allocator = nullptr;
+
+  bool deallocateBatchClassBlock(void *P) {
+    PerClass *C = &PerClassArray[BatchClassId];
+    // Drain all the blocks.
+    if (C->Count == C->MaxCount) {
+      Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
+      C->Count = 0;
+    }
+    BatchClassStorage[C->Count++] =
+        Allocator->compactPtr(BatchClassId, reinterpret_cast<uptr>(P));
+
+    // Currently, BatchClass doesn't support page releasing, so we always return
+    // false.
+    return false;
+  }
+
+  NOINLINE void initCache() {
+    for (uptr I = 0; I < NumClasses; I++) {
+      PerClass *P = &PerClassArray[I];
+      const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+      if (I != BatchClassId) {
+        P->ClassSize = Size;
+        P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
+      } else {
+        // ClassSize in this struct is only used for malloc/free stats, which
+        // should only track user allocations, not internal movements.
+        P->ClassSize = 0;
+        P->MaxCount = SizeClassMap::MaxNumCachedHint;
+      }
+    }
+  }
+};
+
 } // namespace scudo
 
 #endif // SCUDO_LOCAL_CACHE_H_
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 2b520ceb331488..f01293487271a5 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -57,9 +57,11 @@ template <typename Config> class SizeClassAllocator64 {
                 "Group size shouldn't be greater than the region size");
   static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
   typedef SizeClassAllocator64<Config> ThisT;
-  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
   typedef TransferBatch<ThisT> TransferBatchT;
   typedef BatchGroup<ThisT> BatchGroupT;
+  using CacheT = typename Conditional<Config::getEnableCache(),
+                                      SizeClassAllocatorLocalCache<ThisT>,
+                                      NoCache<ThisT>>::type;
 
   // BachClass is used to store internal metadata so it needs to be at least as
   // large as the largest data structure.
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index ff98eb3397ee0e..4b920ba090a031 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -210,6 +210,47 @@ struct TestConditionVariableConfig {
   };
   template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
 };
+
+struct TestNoCacheConfig {
+  static const bool MaySupportMemoryTagging = true;
+  template <class A>
+  using TSDRegistryT =
+      scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+
+  struct Primary {
+    using SizeClassMap = scudo::AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+    static const scudo::uptr RegionSizeLog = 28U;
+    typedef scudo::u32 CompactPtrT;
+    static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+    static const scudo::uptr GroupSizeLog = 20U;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+#else
+    static const scudo::uptr RegionSizeLog = 18U;
+    static const scudo::uptr GroupSizeLog = 18U;
+    typedef scudo::uptr CompactPtrT;
+#endif
+    static const bool EnableCache = false;
+    static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
+  };
+
+#if SCUDO_CAN_USE_PRIMARY64
+  template <typename Config>
+  using PrimaryT = scudo::SizeClassAllocator64<Config>;
+#else
+  template <typename Config>
+  using PrimaryT = scudo::SizeClassAllocator32<Config>;
+#endif
+
+  struct Secondary {
+    template <typename Config>
+    using CacheT = scudo::MapAllocatorNoCache<Config>;
+  };
+  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
 } // namespace scudo
 
 #if SCUDO_FUCHSIA
@@ -219,7 +260,8 @@ struct TestConditionVariableConfig {
 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig)                          \
   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)                          \
-  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
+  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)            \
+  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestNoCacheConfig)
 #endif
 
 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
diff --git a/compiler-rt/lib/scudo/standalone/type_traits.h b/compiler-rt/lib/scudo/standalone/type_traits.h
index 16ed5a048f82bc..1c36a83ade02fd 100644
--- a/compiler-rt/lib/scudo/standalone/type_traits.h
+++ b/compiler-rt/lib/scudo/standalone/type_traits.h
@@ -42,6 +42,14 @@ template <typename T> struct isPointer<T *> {
   static constexpr bool value = true;
 };
 
+template <bool Cond, typename L, typename R> struct Conditional {
+  using type = L;
+};
+
+template <typename L, typename R> struct Conditional<false, L, R> {
+  using type = R;
+};
+
 } // namespace scudo
 
 #endif // SCUDO_TYPE_TRAITS_H_

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants