//===-- primary_test.cpp ----------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "tests/scudo_unit_test.h" #include "primary32.h" #include "primary64.h" #include "size_class_map.h" #include #include #include #include #include // Note that with small enough regions, the SizeClassAllocator64 also works on // 32-bit architectures. It's not something we want to encourage, but we still // should ensure the tests pass. struct TestConfig1 { static const scudo::uptr PrimaryRegionSizeLog = 18U; static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN; static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX; static const bool MaySupportMemoryTagging = false; typedef scudo::uptr PrimaryCompactPtrT; static const scudo::uptr PrimaryCompactPtrScale = 0; static const bool PrimaryEnableRandomOffset = true; static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18; }; struct TestConfig2 { #if defined(__mips__) // Unable to allocate greater size on QEMU-user. static const scudo::uptr PrimaryRegionSizeLog = 23U; #else static const scudo::uptr PrimaryRegionSizeLog = 24U; #endif static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN; static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX; static const bool MaySupportMemoryTagging = false; typedef scudo::uptr PrimaryCompactPtrT; static const scudo::uptr PrimaryCompactPtrScale = 0; static const bool PrimaryEnableRandomOffset = true; static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18; }; struct TestConfig3 { #if defined(__mips__) // Unable to allocate greater size on QEMU-user. static const scudo::uptr PrimaryRegionSizeLog = 23U; #else static const scudo::uptr PrimaryRegionSizeLog = 24U; #endif static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN; static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX; static const bool MaySupportMemoryTagging = true; typedef scudo::uptr PrimaryCompactPtrT; static const scudo::uptr PrimaryCompactPtrScale = 0; static const bool PrimaryEnableRandomOffset = true; static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18; }; template struct Config : public BaseConfig { using SizeClassMap = SizeClassMapT; }; template struct SizeClassAllocator : public scudo::SizeClassAllocator64> {}; template struct SizeClassAllocator : public scudo::SizeClassAllocator32> {}; template struct TestAllocator : public SizeClassAllocator { ~TestAllocator() { this->unmapTestOnly(); } void *operator new(size_t size) { void *p = nullptr; EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size)); return p; } void operator delete(void *ptr) { free(ptr); } }; template struct ScudoPrimaryTest : public Test {}; #if SCUDO_FUCHSIA #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) #else #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) #endif #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \ using FIXTURE##NAME##_##TYPE = FIXTURE##NAME; \ TEST_F(FIXTURE##NAME##_##TYPE, NAME) { Run(); } #define SCUDO_TYPED_TEST(FIXTURE, NAME) \ template \ struct FIXTURE##NAME : public FIXTURE { \ void Run(); \ }; \ SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ template void FIXTURE##NAME::Run() SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) { using Primary = TestAllocator; std::unique_ptr Allocator(new Primary); Allocator->init(/*ReleaseToOsInterval=*/-1); typename Primary::CacheT Cache; Cache.init(nullptr, Allocator.get()); const scudo::uptr NumberOfAllocations = 32U; for (scudo::uptr I = 0; I <= 16U; I++) { const scudo::uptr Size = 1UL << I; if (!Primary::canAllocate(Size)) continue; const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *Pointers[NumberOfAllocations]; for (scudo::uptr J = 0; J < NumberOfAllocations; J++) { void *P = Cache.allocate(ClassId); memset(P, 'B', Size); Pointers[J] = P; } for (scudo::uptr J = 0; J < NumberOfAllocations; J++) Cache.deallocate(ClassId, Pointers[J]); } Cache.destroy(nullptr); Allocator->releaseToOS(); scudo::ScopedString Str; Allocator->getStats(&Str); Str.output(); } struct SmallRegionsConfig { using SizeClassMap = scudo::DefaultSizeClassMap; static const scudo::uptr PrimaryRegionSizeLog = 20U; static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN; static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX; static const bool MaySupportMemoryTagging = false; typedef scudo::uptr PrimaryCompactPtrT; static const scudo::uptr PrimaryCompactPtrScale = 0; static const bool PrimaryEnableRandomOffset = true; static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18; }; // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes. // For the 32-bit one, it requires actually exhausting memory, so we skip it. TEST(ScudoPrimaryTest, Primary64OOM) { using Primary = scudo::SizeClassAllocator64; using TransferBatch = Primary::CacheT::TransferBatch; Primary Allocator; Allocator.init(/*ReleaseToOsInterval=*/-1); typename Primary::CacheT Cache; scudo::GlobalStats Stats; Stats.init(); Cache.init(&Stats, &Allocator); bool AllocationFailed = false; std::vector Batches; const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId; const scudo::uptr Size = Primary::getSizeByClassId(ClassId); for (scudo::uptr I = 0; I < 10000U; I++) { TransferBatch *B = Allocator.popBatch(&Cache, ClassId); if (!B) { AllocationFailed = true; break; } for (scudo::u32 J = 0; J < B->getCount(); J++) memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size); Batches.push_back(B); } while (!Batches.empty()) { Allocator.pushBatch(ClassId, Batches.back()); Batches.pop_back(); } Cache.destroy(nullptr); Allocator.releaseToOS(); scudo::ScopedString Str; Allocator.getStats(&Str); Str.output(); EXPECT_EQ(AllocationFailed, true); Allocator.unmapTestOnly(); } SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) { using Primary = TestAllocator; std::unique_ptr Allocator(new Primary); Allocator->init(/*ReleaseToOsInterval=*/-1); typename Primary::CacheT Cache; Cache.init(nullptr, Allocator.get()); std::vector> V; for (scudo::uptr I = 0; I < 64U; I++) { const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize; const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *P = Cache.allocate(ClassId); V.push_back(std::make_pair(ClassId, P)); } scudo::uptr Found = 0; auto Lambda = [V, &Found](scudo::uptr Block) { for (const auto &Pair : V) { if (Pair.second == reinterpret_cast(Block)) Found++; } }; Allocator->disable(); Allocator->iterateOverBlocks(Lambda); Allocator->enable(); EXPECT_EQ(Found, V.size()); while (!V.empty()) { auto Pair = V.back(); Cache.deallocate(Pair.first, Pair.second); V.pop_back(); } Cache.destroy(nullptr); Allocator->releaseToOS(); scudo::ScopedString Str; Allocator->getStats(&Str); Str.output(); } SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) { using Primary = TestAllocator; std::unique_ptr Allocator(new Primary); Allocator->init(/*ReleaseToOsInterval=*/-1); std::mutex Mutex; std::condition_variable Cv; bool Ready = false; std::thread Threads[32]; for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) Threads[I] = std::thread([&]() { static thread_local typename Primary::CacheT Cache; Cache.init(nullptr, Allocator.get()); std::vector> V; { std::unique_lock Lock(Mutex); while (!Ready) Cv.wait(Lock); } for (scudo::uptr I = 0; I < 256U; I++) { const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize / 4; const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *P = Cache.allocate(ClassId); if (P) V.push_back(std::make_pair(ClassId, P)); } while (!V.empty()) { auto Pair = V.back(); Cache.deallocate(Pair.first, Pair.second); V.pop_back(); } Cache.destroy(nullptr); }); { std::unique_lock Lock(Mutex); Ready = true; Cv.notify_all(); } for (auto &T : Threads) T.join(); Allocator->releaseToOS(); scudo::ScopedString Str; Allocator->getStats(&Str); Str.output(); } // Through a simple allocation that spans two pages, verify that releaseToOS // actually releases some bytes (at least one page worth). This is a regression // test for an error in how the release criteria were computed. SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) { using Primary = TestAllocator; std::unique_ptr Allocator(new Primary); Allocator->init(/*ReleaseToOsInterval=*/-1); typename Primary::CacheT Cache; Cache.init(nullptr, Allocator.get()); const scudo::uptr Size = scudo::getPageSizeCached() * 2; EXPECT_TRUE(Primary::canAllocate(Size)); const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *P = Cache.allocate(ClassId); EXPECT_NE(P, nullptr); Cache.deallocate(ClassId, P); Cache.destroy(nullptr); EXPECT_GT(Allocator->releaseToOS(), 0U); }