diff --git a/cachelib/allocator/CacheAllocator.h b/cachelib/allocator/CacheAllocator.h index 612f6d2185..252316056c 100644 --- a/cachelib/allocator/CacheAllocator.h +++ b/cachelib/allocator/CacheAllocator.h @@ -1660,7 +1660,7 @@ class CacheAllocator : public CacheBase { // @return An evicted item or nullptr if there is no suitable candidate. Item* findEviction(PoolId pid, ClassId cid); - using EvictionIterator = typename MMContainer::Iterator; + using EvictionIterator = typename MMContainer::LockedIterator; // Advance the current iterator and try to evict a regular item // diff --git a/cachelib/allocator/MM2Q-inl.h b/cachelib/allocator/MM2Q-inl.h index d62b707179..ba388d40a4 100644 --- a/cachelib/allocator/MM2Q-inl.h +++ b/cachelib/allocator/MM2Q-inl.h @@ -241,29 +241,21 @@ bool MM2Q::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MM2Q::Container::Iterator +typename MM2Q::Container::LockedIterator MM2Q::Container::getEvictionIterator() const noexcept { - // we cannot use combined critical sections with folly::DistributedMutex here - // because the lock is held for the lifetime of the eviction iterator. In - // other words, the abstraction of the iterator just does not lend itself well - // to combinable critical sections as the user can hold the lock for an - // arbitrary amount of time outside a lambda-friendly piece of code (eg. they - // can return the iterator from functions, pass it to functions, etc) - // - // it would be theoretically possible to refactor this interface into - // something like the following to allow combining - // - // mm2q.withEvictionIterator([&](auto iterator) { - // // user code - // }); - // - // at the time of writing it is unclear if the gains from combining are - // reasonable justification for the codemod required to achieve combinability - // as we don't expect this critical section to be the hotspot in user code. - // This is however subject to change at some time in the future as and when - // this assertion becomes false. LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; + return LockedIterator{std::move(l), lru_.rbegin()}; +} + +template T::*HookPtr> +template +void MM2Q::Container::withEvictionIterator(F&& fun) { + if (config_.useCombinedLockForIterators) { + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); + } else { + LockHolder lck{*lruMutex_}; + fun(Iterator{lru_.rbegin()}); + } } template T::*HookPtr> @@ -462,8 +454,9 @@ void MM2Q::Container::reconfigureLocked(const Time& currTime) { // Iterator Context Implementation template T::*HookPtr> -MM2Q::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} +MM2Q::Container::LockedIterator::LockedIterator( + LockHolder l, const Iterator& iter) noexcept + : Iterator(iter), l_(std::move(l)) {} + } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MM2Q.h b/cachelib/allocator/MM2Q.h index a3ffdb718e..982eca21f9 100644 --- a/cachelib/allocator/MM2Q.h +++ b/cachelib/allocator/MM2Q.h @@ -214,6 +214,50 @@ class MM2Q { size_t hotPercent, size_t coldPercent, uint32_t mmReconfigureInterval) + : Config(time, + ratio, + updateOnW, + updateOnR, + tryLockU, + rebalanceOnRecordAccs, + hotPercent, + coldPercent, + mmReconfigureInterval, + false) {} + + // @param time the LRU refresh time. + // An item will be promoted only once in each + // lru refresh time depite the + // number of accesses it gets. + // @param ratio the LRU refresh ratio. The ratio times the + // oldest element's lifetime in warm queue + // would be the minimum value of LRU refresh + // time. + // @param udpateOnW whether to promote the item on write + // @param updateOnR whether to promote the item on read + // @param tryLockU whether to use a try lock when doing + // update. + // @param rebalanceOnRecordAccs whether to do rebalance on access. If set + // to false, rebalance only happens when + // items are added or removed to the queue. + // @param hotPercent percentage number for the size of the hot + // queue in the overall size. + // @param coldPercent percentage number for the size of the cold + // queue in the overall size. + // @param mmReconfigureInterval Time interval for recalculating lru + // refresh time according to the ratio. + // useCombinedLockForIterators Whether to use combined locking for + // withEvictionIterator + Config(uint32_t time, + double ratio, + bool updateOnW, + bool updateOnR, + bool tryLockU, + bool rebalanceOnRecordAccs, + size_t hotPercent, + size_t coldPercent, + uint32_t mmReconfigureInterval, + bool useCombinedLockForIterators) : defaultLruRefreshTime(time), lruRefreshRatio(ratio), updateOnWrite(updateOnW), @@ -223,7 +267,8 @@ class MM2Q { hotSizePercent(hotPercent), coldSizePercent(coldPercent), mmReconfigureIntervalSecs( - std::chrono::seconds(mmReconfigureInterval)) { + std::chrono::seconds(mmReconfigureInterval)), + useCombinedLockForIterators(useCombinedLockForIterators) { checkLruSizes(); } @@ -306,6 +351,9 @@ class MM2Q { // Minimum interval between reconfigurations. If 0, reconfigure is never // called. std::chrono::seconds mmReconfigureIntervalSecs{}; + + // Whether to use combined locking for withEvictionIterator. + bool useCombinedLockForIterators{false}; }; // The container object which can be used to keep track of objects of type @@ -347,22 +395,24 @@ class MM2Q { Container(const Container&) = delete; Container& operator=(const Container&) = delete; + using Iterator = typename LruList::Iterator; + // context for iterating the MM container. At any given point of time, // there can be only one iterator active since we need to lock the LRU for // iteration. we can support multiple iterators at same time, by using a // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { + class LockedIterator : public Iterator { public: // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; + LockedIterator(const LockedIterator&) = delete; + LockedIterator& operator=(const LockedIterator&) = delete; - Iterator(Iterator&&) noexcept = default; + LockedIterator(LockedIterator&&) noexcept = default; // 1. Invalidate this iterator // 2. Unlock void destroy() { - LruList::Iterator::reset(); + Iterator::reset(); if (l_.owns_lock()) { l_.unlock(); } @@ -373,15 +423,15 @@ class MM2Q { if (!l_.owns_lock()) { l_.lock(); } - LruList::Iterator::resetToBegin(); + Iterator::resetToBegin(); } private: // private because it's easy to misuse and cause deadlock for MM2Q - Iterator& operator=(Iterator&&) noexcept = default; + LockedIterator& operator=(LockedIterator&&) noexcept = default; // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; + LockedIterator(LockHolder l, const Iterator& iter) noexcept; // only the container can create iterators friend Container; @@ -422,7 +472,7 @@ class MM2Q { // same as the above but uses an iterator context. The iterator is updated // on removal of the corresponding node to point to the next node. The - // iterator context holds the lock on the lru. + // iterator context is responsible for locking. // // iterator will be advanced to the next node after removing the node // @@ -445,7 +495,12 @@ class MM2Q { // Obtain an iterator that start from the tail and can be used // to search for evictions. This iterator holds a lock to this // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + LockedIterator getEvictionIterator() const noexcept; + + // Execute provided function under container lock. Function gets + // Iterator passed as parameter. + template + void withEvictionIterator(F&& f); // get the current config as a copy Config getConfig() const; diff --git a/cachelib/allocator/MMLru-inl.h b/cachelib/allocator/MMLru-inl.h index 75ba5037f6..d35759f212 100644 --- a/cachelib/allocator/MMLru-inl.h +++ b/cachelib/allocator/MMLru-inl.h @@ -212,10 +212,21 @@ bool MMLru::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MMLru::Container::Iterator +typename MMLru::Container::LockedIterator MMLru::Container::getEvictionIterator() const noexcept { LockHolder l(*lruMutex_); - return Iterator{std::move(l), lru_.rbegin()}; + return LockedIterator{std::move(l), lru_.rbegin()}; +} + +template T::*HookPtr> +template +void MMLru::Container::withEvictionIterator(F&& fun) { + if (config_.useCombinedLockForIterators) { + lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.rbegin()}); }); + } else { + LockHolder lck{*lruMutex_}; + fun(Iterator{lru_.rbegin()}); + } } template T::*HookPtr> @@ -360,8 +371,9 @@ void MMLru::Container::reconfigureLocked(const Time& currTime) { // Iterator Context Implementation template T::*HookPtr> -MMLru::Container::Iterator::Iterator( - LockHolder l, const typename LruList::Iterator& iter) noexcept - : LruList::Iterator(iter), l_(std::move(l)) {} +MMLru::Container::LockedIterator::LockedIterator( + LockHolder l, const Iterator& iter) noexcept + : Iterator(iter), l_(std::move(l)) {} + } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/MMLru.h b/cachelib/allocator/MMLru.h index c280242ae5..29c6d02689 100644 --- a/cachelib/allocator/MMLru.h +++ b/cachelib/allocator/MMLru.h @@ -145,6 +145,40 @@ class MMLru { bool tryLockU, uint8_t ipSpec, uint32_t mmReconfigureInterval) + : Config(time, + ratio, + updateOnW, + updateOnR, + tryLockU, + ipSpec, + mmReconfigureInterval, + false) {} + + // @param time the LRU refresh time in seconds. + // An item will be promoted only once in each lru refresh + // time depite the number of accesses it gets. + // @param ratio the lru refresh ratio. The ratio times the + // oldest element's lifetime in warm queue + // would be the minimum value of LRU refresh time. + // @param udpateOnW whether to promote the item on write + // @param updateOnR whether to promote the item on read + // @param tryLockU whether to use a try lock when doing update. + // @param ipSpec insertion point spec, which is the inverse power of + // length from the end of the queue. For example, value 1 + // means the insertion point is 1/2 from the end of LRU; + // value 2 means 1/4 from the end of LRU. + // @param mmReconfigureInterval Time interval for recalculating lru + // refresh time according to the ratio. + // useCombinedLockForIterators Whether to use combined locking for + // withEvictionIterator + Config(uint32_t time, + double ratio, + bool updateOnW, + bool updateOnR, + bool tryLockU, + uint8_t ipSpec, + uint32_t mmReconfigureInterval, + bool useCombinedLockForIterators) : defaultLruRefreshTime(time), lruRefreshRatio(ratio), updateOnWrite(updateOnW), @@ -152,7 +186,8 @@ class MMLru { tryLockUpdate(tryLockU), lruInsertionPointSpec(ipSpec), mmReconfigureIntervalSecs( - std::chrono::seconds(mmReconfigureInterval)) {} + std::chrono::seconds(mmReconfigureInterval)), + useCombinedLockForIterators(useCombinedLockForIterators) {} Config() = default; Config(const Config& rhs) = default; @@ -198,6 +233,9 @@ class MMLru { // Minimum interval between reconfigurations. If 0, reconfigure is never // called. std::chrono::seconds mmReconfigureIntervalSecs{}; + + // Whether to use combined locking for withEvictionIterator. + bool useCombinedLockForIterators{false}; }; // The container object which can be used to keep track of objects of type @@ -234,22 +272,24 @@ class MMLru { Container(const Container&) = delete; Container& operator=(const Container&) = delete; + using Iterator = typename LruList::Iterator; + // context for iterating the MM container. At any given point of time, // there can be only one iterator active since we need to lock the LRU for // iteration. we can support multiple iterators at same time, by using a // shared ptr in the context for the lock holder in the future. - class Iterator : public LruList::Iterator { + class LockedIterator : public Iterator { public: // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; + LockedIterator(const LockedIterator&) = delete; + LockedIterator& operator=(const LockedIterator&) = delete; - Iterator(Iterator&&) noexcept = default; + LockedIterator(LockedIterator&&) noexcept = default; // 1. Invalidate this iterator // 2. Unlock void destroy() { - LruList::Iterator::reset(); + Iterator::reset(); if (l_.owns_lock()) { l_.unlock(); } @@ -260,15 +300,15 @@ class MMLru { if (!l_.owns_lock()) { l_.lock(); } - LruList::Iterator::resetToBegin(); + Iterator::resetToBegin(); } private: // private because it's easy to misuse and cause deadlock for MMLru - Iterator& operator=(Iterator&&) noexcept = default; + LockedIterator& operator=(LockedIterator&&) noexcept = default; // create an lru iterator with the lock being held. - Iterator(LockHolder l, const typename LruList::Iterator& iter) noexcept; + LockedIterator(LockHolder l, const Iterator& iter) noexcept; // only the container can create iterators friend Container; @@ -307,10 +347,9 @@ class MMLru { // state of node is unchanged. bool remove(T& node) noexcept; - using Iterator = Iterator; // same as the above but uses an iterator context. The iterator is updated // on removal of the corresponding node to point to the next node. The - // iterator context holds the lock on the lru. + // iterator context is responsible for locking. // // iterator will be advanced to the next node after removing the node // @@ -330,7 +369,12 @@ class MMLru { // Obtain an iterator that start from the tail and can be used // to search for evictions. This iterator holds a lock to this // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + LockedIterator getEvictionIterator() const noexcept; + + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); // get copy of current config Config getConfig() const; diff --git a/cachelib/allocator/MMTinyLFU-inl.h b/cachelib/allocator/MMTinyLFU-inl.h index 59c72c1720..46640b24ca 100644 --- a/cachelib/allocator/MMTinyLFU-inl.h +++ b/cachelib/allocator/MMTinyLFU-inl.h @@ -214,10 +214,17 @@ bool MMTinyLFU::Container::add(T& node) noexcept { } template T::*HookPtr> -typename MMTinyLFU::Container::Iterator +typename MMTinyLFU::Container::LockedIterator MMTinyLFU::Container::getEvictionIterator() const noexcept { LockHolder l(lruMutex_); - return Iterator{std::move(l), *this}; + return LockedIterator{std::move(l), *this}; +} + +template T::*HookPtr> +template +void MMTinyLFU::Container::withEvictionIterator(F&& fun) { + // TinyLFU uses spin lock which does not support combined locking + fun(getEvictionIterator()); } template T::*HookPtr> @@ -245,7 +252,7 @@ bool MMTinyLFU::Container::remove(T& node) noexcept { } template T::*HookPtr> -void MMTinyLFU::Container::remove(Iterator& it) noexcept { +void MMTinyLFU::Container::remove(LockedIterator& it) noexcept { T& node = *it; XDCHECK(node.isInMMContainer()); ++it; @@ -347,9 +354,9 @@ void MMTinyLFU::Container::reconfigureLocked(const Time& currTime) { lruRefreshTime_.store(lruRefreshTime, std::memory_order_relaxed); } -// Iterator Context Implementation +// Locked Iterator Context Implementation template T::*HookPtr> -MMTinyLFU::Container::Iterator::Iterator( +MMTinyLFU::Container::LockedIterator::LockedIterator( LockHolder l, const Container& c) noexcept : c_(c), tIter_(c.lru_.getList(LruType::Tiny).rbegin()), diff --git a/cachelib/allocator/MMTinyLFU.h b/cachelib/allocator/MMTinyLFU.h index 690b5be8b3..a9764a10f3 100644 --- a/cachelib/allocator/MMTinyLFU.h +++ b/cachelib/allocator/MMTinyLFU.h @@ -336,7 +336,7 @@ class MMTinyLFU { // state of node is unchanged. bool remove(T& node) noexcept; - class Iterator; + class LockedIterator; // same as the above but uses an iterator context. The iterator is updated // on removal of the corresponding node to point to the next node. The // iterator context holds the lock on the lru. @@ -344,7 +344,7 @@ class MMTinyLFU { // iterator will be advanced to the next node after removing the node // // @param it Iterator that will be removed - void remove(Iterator& it) noexcept; + void remove(LockedIterator& it) noexcept; // replaces one node with another, at the same position // @@ -360,20 +360,20 @@ class MMTinyLFU { // there can be only one iterator active since we need to lock the LRU for // iteration. we can support multiple iterators at same time, by using a // shared ptr in the context for the lock holder in the future. - class Iterator { + class LockedIterator { public: using ListIterator = typename LruList::DListIterator; // noncopyable but movable. - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; - Iterator(Iterator&&) noexcept = default; + LockedIterator(const LockedIterator&) = delete; + LockedIterator& operator=(const LockedIterator&) = delete; + LockedIterator(LockedIterator&&) noexcept = default; - Iterator& operator++() noexcept { + LockedIterator& operator++() noexcept { ++getIter(); return *this; } - Iterator& operator--() { + LockedIterator& operator--() { throw std::invalid_argument( "Decrementing eviction iterator is not supported"); } @@ -381,12 +381,12 @@ class MMTinyLFU { T* operator->() const noexcept { return getIter().operator->(); } T& operator*() const noexcept { return getIter().operator*(); } - bool operator==(const Iterator& other) const noexcept { + bool operator==(const LockedIterator& other) const noexcept { return &c_ == &other.c_ && tIter_ == other.tIter_ && mIter_ == other.mIter_; } - bool operator!=(const Iterator& other) const noexcept { + bool operator!=(const LockedIterator& other) const noexcept { return !(*this == other); } @@ -421,10 +421,10 @@ class MMTinyLFU { private: // private because it's easy to misuse and cause deadlock for MMTinyLFU - Iterator& operator=(Iterator&&) noexcept = default; + LockedIterator& operator=(LockedIterator&&) noexcept = default; // create an lru iterator with the lock being held. - explicit Iterator(LockHolder l, const Container& c) noexcept; + explicit LockedIterator(LockHolder l, const Container& c) noexcept; const ListIterator& getIter() const noexcept { return evictTiny() ? tIter_ : mIter_; @@ -432,7 +432,7 @@ class MMTinyLFU { ListIterator& getIter() noexcept { return const_cast( - static_cast(this)->getIter()); + static_cast(this)->getIter()); } bool evictTiny() const noexcept { @@ -489,7 +489,12 @@ class MMTinyLFU { // Obtain an iterator that start from the tail and can be used // to search for evictions. This iterator holds a lock to this // container and only one such iterator can exist at a time - Iterator getEvictionIterator() const noexcept; + LockedIterator getEvictionIterator() const noexcept; + + // Execute provided function under container lock. Function gets + // iterator passed as parameter. + template + void withEvictionIterator(F&& f); // for saving the state of the lru // diff --git a/cachelib/allocator/tests/MM2QTest.cpp b/cachelib/allocator/tests/MM2QTest.cpp index 9f76b74054..14b3fde1e4 100644 --- a/cachelib/allocator/tests/MM2QTest.cpp +++ b/cachelib/allocator/tests/MM2QTest.cpp @@ -43,6 +43,7 @@ TEST_F(MM2QTest, RemoveWithSmallQueues) { // trying to remove through iterator should work as expected. // no need of iter++ since remove will do that. + verifyIterationVariants(c); for (auto iter = c.getEvictionIterator(); iter;) { auto& node = *iter; ASSERT_TRUE(node.isInMMContainer()); @@ -54,6 +55,7 @@ TEST_F(MM2QTest, RemoveWithSmallQueues) { ASSERT_NE((*iter).getId(), node.getId()); } } + verifyIterationVariants(c); ASSERT_EQ(c.getStats().size, 0); for (const auto& node : nodes) { @@ -75,9 +77,9 @@ TEST_F(MM2QTest, RecordAccessWrites) { // ensure that nodes are updated in lru with access mode write (read) only // when updateOnWrite (updateOnRead) is enabled. - auto testWithAccessMode = [](Container& c_, const Nodes& nodes_, - AccessMode mode, bool updateOnWrites, - bool updateOnReads) { + auto testWithAccessMode = [this](Container& c_, const Nodes& nodes_, + AccessMode mode, bool updateOnWrites, + bool updateOnReads) { // accessing must at least update the update time. to do so, first set the // updateTime of the node to be in the past. const uint32_t timeInPastStart = 100; @@ -95,6 +97,7 @@ TEST_F(MM2QTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderPrev.push_back(itr->getId()); } + verifyIterationVariants(c_); int nAccess = 1000; std::set accessedNodes; @@ -122,6 +125,7 @@ TEST_F(MM2QTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderCurr.push_back(itr->getId()); } + verifyIterationVariants(c_); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -209,6 +213,7 @@ TEST_F(MM2QTest, RecordAccessWrites) { template void MMTypeTest::testIterate(std::vector>& nodes, Container& c) { + verifyIterationVariants(c); auto it2q = c.getEvictionIterator(); auto it = nodes.begin(); while (it2q) { @@ -223,6 +228,7 @@ void MMTypeTest::testMatch(std::string expected, MMTypeTest::Container& c) { int index = -1; std::string actual; + verifyIterationVariants(c); auto it2q = c.getEvictionIterator(); while (it2q) { ++index; @@ -694,5 +700,40 @@ TEST_F(MM2QTest, TailTrackingEnabledCheck) { EXPECT_THROW(c.setConfig(newConfig), std::invalid_argument); } } + +TEST_F(MM2QTest, CombinedLockingIteration) { + MM2QTest::Config config{}; + config.useCombinedLockForIterators = true; + config.lruRefreshTime = 0; + Container c(config, {}); + std::vector> nodes; + createSimpleContainer(c, nodes); + + // access to move items from cold to warm + for (auto& node : nodes) { + ASSERT_TRUE(c.recordAccess(*node, AccessMode::kRead)); + } + + // trying to remove through iterator should work as expected. + // no need of iter++ since remove will do that. + verifyIterationVariants(c); + for (auto iter = c.getEvictionIterator(); iter;) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } + } + verifyIterationVariants(c); + + ASSERT_EQ(c.getStats().size, 0); + for (const auto& node : nodes) { + ASSERT_FALSE(node->isInMMContainer()); + } +} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/tests/MMLruTest.cpp b/cachelib/allocator/tests/MMLruTest.cpp index 3db40bb72e..5250031e04 100644 --- a/cachelib/allocator/tests/MMLruTest.cpp +++ b/cachelib/allocator/tests/MMLruTest.cpp @@ -42,9 +42,9 @@ TEST_F(MMLruTest, RecordAccessWrites) { // ensure that nodes are updated in lru with access mode write (read) only // when updateOnWrite (updateOnRead) is enabled. - auto testWithAccessMode = [](Container& c_, const Nodes& nodes_, - AccessMode mode, bool updateOnWrites, - bool updateOnReads) { + auto testWithAccessMode = [this](Container& c_, const Nodes& nodes_, + AccessMode mode, bool updateOnWrites, + bool updateOnReads) { // accessing must at least update the update time. to do so, first set the // updateTime of the node to be in the past. const uint32_t timeInPastStart = 100; @@ -62,6 +62,7 @@ TEST_F(MMLruTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderPrev.push_back(itr->getId()); } + verifyIterationVariants(c_); int nAccess = 1000; std::set accessedNodes; @@ -89,6 +90,7 @@ TEST_F(MMLruTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderCurr.push_back(itr->getId()); } + verifyIterationVariants(c_); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -180,6 +182,7 @@ TEST_F(MMLruTest, InsertionPointBasic) { } auto checkLruConfig = [&](Container& container, std::vector order) { + verifyIterationVariants(container); auto it = container.getEvictionIterator(); int i = 0; while (it) { @@ -379,6 +382,7 @@ TEST_F(MMLruTest, InsertionPointStress) { auto getTailCount = [&]() { size_t ntail = 0; + verifyIterationVariants(c); auto it = c.getEvictionIterator(); while (it) { if (it->isTail()) { @@ -541,5 +545,40 @@ TEST_F(MMLruTest, Reconfigure) { // node 0 (age 2) does not get promoted EXPECT_FALSE(container.recordAccess(*nodes[0], AccessMode::kRead)); } + +TEST_F(MMLruTest, CombinedLockingIteration) { + MMLruTest::Config config{}; + config.useCombinedLockForIterators = true; + config.lruRefreshTime = 0; + Container c(config, {}); + std::vector> nodes; + createSimpleContainer(c, nodes); + + // access to move items from cold to warm + for (auto& node : nodes) { + ASSERT_TRUE(c.recordAccess(*node, AccessMode::kRead)); + } + + // trying to remove through iterator should work as expected. + // no need of iter++ since remove will do that. + verifyIterationVariants(c); + for (auto iter = c.getEvictionIterator(); iter;) { + auto& node = *iter; + ASSERT_TRUE(node.isInMMContainer()); + + // this will move the iter. + c.remove(iter); + ASSERT_FALSE(node.isInMMContainer()); + if (iter) { + ASSERT_NE((*iter).getId(), node.getId()); + } + } + verifyIterationVariants(c); + + ASSERT_EQ(c.getStats().size, 0); + for (const auto& node : nodes) { + ASSERT_FALSE(node->isInMMContainer()); + } +} } // namespace cachelib } // namespace facebook diff --git a/cachelib/allocator/tests/MMTinyLFUTest.cpp b/cachelib/allocator/tests/MMTinyLFUTest.cpp index a57fd859fb..adcea95ebe 100644 --- a/cachelib/allocator/tests/MMTinyLFUTest.cpp +++ b/cachelib/allocator/tests/MMTinyLFUTest.cpp @@ -42,9 +42,9 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { // ensure that nodes are updated in lru with access mode write (read) only // when updateOnWrite (updateOnRead) is enabled. - auto testWithAccessMode = [](Container& c_, const Nodes& nodes_, - AccessMode mode, bool updateOnWrites, - bool updateOnReads) { + auto testWithAccessMode = [this](Container& c_, const Nodes& nodes_, + AccessMode mode, bool updateOnWrites, + bool updateOnReads) { // accessing must at least update the update time. to do so, first set the // updateTime of the node to be in the past. const uint32_t timeInPastStart = 100; @@ -62,6 +62,7 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderPrev.push_back(itr->getId()); } + verifyIterationVariants(c_); int nAccess = 1000; std::set accessedNodes; @@ -89,6 +90,7 @@ TEST_F(MMTinyLFUTest, RecordAccessWrites) { for (auto itr = c_.getEvictionIterator(); itr; ++itr) { nodeOrderCurr.push_back(itr->getId()); } + verifyIterationVariants(c_); if ((mode == AccessMode::kWrite && updateOnWrites) || (mode == AccessMode::kRead && updateOnReads)) { @@ -170,6 +172,7 @@ TEST_F(MMTinyLFUTest, TinyLFUBasic) { auto checkTlfuConfig = [&](Container& container, std::string expected, std::string context) { + verifyIterationVariants(container); auto it = container.getEvictionIterator(); std::string actual; while (it) { diff --git a/cachelib/allocator/tests/MMTypeTest.h b/cachelib/allocator/tests/MMTypeTest.h index ba12266068..d38f6ce2c1 100644 --- a/cachelib/allocator/tests/MMTypeTest.h +++ b/cachelib/allocator/tests/MMTypeTest.h @@ -149,6 +149,7 @@ class MMTypeTest : public testing::Test { void testIterate(std::vector>& nodes, Container& c); void testMatch(std::string expected, Container& c); size_t getListSize(const Container& c, typename MMType::LruType list); + void verifyIterationVariants(Container& c); }; template @@ -187,6 +188,7 @@ void MMTypeTest::testAddBasic( } EXPECT_EQ(foundNodes.size(), c.getStats().size); EXPECT_EQ(foundNodes.size(), c.size()); + verifyIterationVariants(c); } template @@ -232,6 +234,7 @@ void MMTypeTest::testRemoveBasic(Config config) { for (auto itr = c.getEvictionIterator(); itr; ++itr) { foundNodes.insert(itr->getId()); } + verifyIterationVariants(c); for (const auto& node : removedNodes) { ASSERT_EQ(foundNodes.find(node->getId()), foundNodes.end()); @@ -249,6 +252,7 @@ void MMTypeTest::testRemoveBasic(Config config) { ASSERT_NE((*iter).getId(), node.getId()); } } + verifyIterationVariants(c); EXPECT_EQ(c.getStats().size, 0); EXPECT_EQ(c.size(), 0); @@ -328,6 +332,7 @@ void MMTypeTest::testSerializationBasic(Config config) { break; } } + verifyIterationVariants(c2); ASSERT_TRUE(foundNode); foundNode = false; } @@ -343,5 +348,28 @@ size_t MMTypeTest::getListSize(const Container& c, } throw std::invalid_argument("LruType not existing"); } + +// Verifies that using getEvictionIterator and withEvictionIterator +// yields the same elements, in the same order. +template +void MMTypeTest::verifyIterationVariants(Container& c) { + std::vector nodes; + for (auto iter = c.getEvictionIterator(); iter; ++iter) { + nodes.push_back(&(*iter)); + } + + size_t i = 0; + c.withEvictionIterator([&nodes, &i](auto&& iter) { + while (iter) { + auto& node = *iter; + ASSERT_EQ(&node, nodes[i]); + + ++iter; + ++i; + } + + ASSERT_EQ(i, nodes.size()); + }); +} } // namespace cachelib } // namespace facebook diff --git a/cachelib/benchmarks/MMTypeBench.h b/cachelib/benchmarks/MMTypeBench.h index 52700d650c..6382da642b 100644 --- a/cachelib/benchmarks/MMTypeBench.h +++ b/cachelib/benchmarks/MMTypeBench.h @@ -203,10 +203,11 @@ void MMTypeBench::benchRemoveIterator(unsigned int numNodes) { // // no need of iter++ since remove will do that. for (unsigned int deleted = 0; deleted < numNodes; deleted++) { - auto iter = c->getEvictionIterator(); - if (iter) { - c->remove(iter); - } + c->withEvictionIterator([this](auto&& iter) { + if (iter) { + c->remove(iter); + } + }); } } diff --git a/cachelib/cachebench/cache/Cache.h b/cachelib/cachebench/cache/Cache.h index c107b269e5..6a8f0b3342 100644 --- a/cachelib/cachebench/cache/Cache.h +++ b/cachelib/cachebench/cache/Cache.h @@ -466,7 +466,9 @@ inline typename LruAllocator::MMConfig makeMMConfig(CacheConfig const& config) { config.lruUpdateOnWrite, config.lruUpdateOnRead, config.tryLockUpdate, - static_cast(config.lruIpSpec)); + static_cast(config.lruIpSpec), + 0, + config.useCombinedLockForIterators); } // LRU @@ -480,7 +482,9 @@ inline typename Lru2QAllocator::MMConfig makeMMConfig( config.tryLockUpdate, false, config.lru2qHotPct, - config.lru2qColdPct); + config.lru2qColdPct, + 0, + config.useCombinedLockForIterators); } } // namespace cachebench diff --git a/cachelib/cachebench/util/CacheConfig.cpp b/cachelib/cachebench/util/CacheConfig.cpp index 50297955d2..1d464b4847 100644 --- a/cachelib/cachebench/util/CacheConfig.cpp +++ b/cachelib/cachebench/util/CacheConfig.cpp @@ -43,6 +43,7 @@ CacheConfig::CacheConfig(const folly::dynamic& configJson) { JSONSetVal(configJson, lruUpdateOnRead); JSONSetVal(configJson, tryLockUpdate); JSONSetVal(configJson, lruIpSpec); + JSONSetVal(configJson, useCombinedLockForIterators); JSONSetVal(configJson, lru2qHotPct); JSONSetVal(configJson, lru2qColdPct); diff --git a/cachelib/cachebench/util/CacheConfig.h b/cachelib/cachebench/util/CacheConfig.h index ea863407cf..1e1e0b1448 100644 --- a/cachelib/cachebench/util/CacheConfig.h +++ b/cachelib/cachebench/util/CacheConfig.h @@ -72,6 +72,7 @@ struct CacheConfig : public JSONConfig { bool lruUpdateOnWrite{false}; bool lruUpdateOnRead{true}; bool tryLockUpdate{false}; + bool useCombinedLockForIterators{false}; // LRU param uint64_t lruIpSpec{0}; diff --git a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md index e23217e36c..84d41ff33b 100644 --- a/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md +++ b/website/docs/Cache_Library_Architecture_Guide/RAM_cache_indexing_and_eviction.md @@ -114,6 +114,10 @@ It has the following major API functions: that can be evicted (no active handles, not moving, etc) is used (see `CacheAllocator::findEviction(PoolId pid, ClassId cid)` in `cachelib/allocator/CacheAllocator-inl.h`). +* `withEvictionIterator`: Executes callback with eviction iterator passed as a + parameter. This is alternative for `getEvictionIterator` that offers possibility + to use combined locking. Combined locking can be turned on by setting: + `useCombinedLockForIterators` config option. The full API can be found in `struct Container` in `cachelib/allocator/MMLru.h`. This links to MMLru, which is one of the