From f650fbaa1b21b19345fdceef389752feb2f9296e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 12:49:38 -0700 Subject: [PATCH 01/20] Adds stats to TieredSpilloverCache Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 180 +++++++++++++++--- 1 file changed, 153 insertions(+), 27 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index ae3d9f1dbcf62..282c593f17544 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -18,8 +18,10 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.policy.CachedQueryResult; +import org.opensearch.common.cache.stats.CacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -37,6 +39,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.ToLongBiFunction; /** * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap @@ -57,9 +60,16 @@ public class TieredSpilloverCache implements ICache { private final ICache diskCache; private final ICache onHeapCache; + private final RemovalListener, V> onDiskRemovalListener; + private final RemovalListener, V> onHeapRemovalListener; + // The listener for removals from the spillover cache as a whole - // TODO: In TSC stats PR, each tier will have its own separate removal listener. private final RemovalListener, V> removalListener; + + // In future we want to just read the stats from the individual tiers' statsHolder objects, but this isn't + // possible right now because of the way computeIfAbsent is implemented. + private final CacheStatsHolder statsHolder; + private ToLongBiFunction, V> weigher; private final List dimensionNames; ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); @@ -68,27 +78,25 @@ public class TieredSpilloverCache implements ICache { * Maintains caching tiers in ascending order of cache latency. */ private final List> cacheList; + private final List, String>> cacheAndTierValueList; private final List> policies; + // Common values used for tier dimension + public static final String TIER_DIMENSION_NAME = "tier"; + public static final String TIER_DIMENSION_VALUE_ON_HEAP = "on_heap"; + public static final String TIER_DIMENSION_VALUE_DISK = "disk"; + TieredSpilloverCache(Builder builder) { Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); this.removalListener = Objects.requireNonNull(builder.removalListener, "Removal listener can't be null"); + this.onHeapRemovalListener = new HeapTierRemovalListener(this); + this.onDiskRemovalListener = new DiskTierRemovalListener(this); + this.weigher = Objects.requireNonNull(builder.cacheConfig.getWeigher(), "Weigher can't be null"); + this.onHeapCache = builder.onHeapCacheFactory.create( - new CacheConfig.Builder().setRemovalListener(new RemovalListener, V>() { - @Override - public void onRemoval(RemovalNotification, V> notification) { - try (ReleasableLock ignore = writeLock.acquire()) { - if (SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) - && evaluatePolicies(notification.getValue())) { - diskCache.put(notification.getKey(), notification.getValue()); - } else { - removalListener.onRemoval(notification); - } - } - } - }) + new CacheConfig.Builder().setRemovalListener(onHeapRemovalListener) .setKeyType(builder.cacheConfig.getKeyType()) .setValueType(builder.cacheConfig.getValueType()) .setSettings(builder.cacheConfig.getSettings()) @@ -102,9 +110,25 @@ && evaluatePolicies(notification.getValue())) { builder.cacheFactories ); - this.diskCache = builder.diskCacheFactory.create(builder.cacheConfig, builder.cacheType, builder.cacheFactories); + this.diskCache = builder.diskCacheFactory.create( + new CacheConfig.Builder().setRemovalListener(onDiskRemovalListener) + .setKeyType(builder.cacheConfig.getKeyType()) + .setValueType(builder.cacheConfig.getValueType()) + .setSettings(builder.cacheConfig.getSettings()) + .setWeigher(builder.cacheConfig.getWeigher()) + .setDimensionNames(builder.cacheConfig.getDimensionNames()) + .build(), + builder.cacheType, + builder.cacheFactories + ); this.cacheList = Arrays.asList(onHeapCache, diskCache); this.dimensionNames = builder.cacheConfig.getDimensionNames(); + this.cacheAndTierValueList = List.of( + new Tuple<>(onHeapCache, TIER_DIMENSION_VALUE_ON_HEAP), + new Tuple<>(diskCache, TIER_DIMENSION_VALUE_DISK) + ); + // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole + this.statsHolder = new CacheStatsHolder(addTierValueToDimensionValues(dimensionNames, TIER_DIMENSION_NAME)); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list } @@ -127,6 +151,7 @@ public V get(ICacheKey key) { public void put(ICacheKey key, V value) { try (ReleasableLock ignore = writeLock.acquire()) { onHeapCache.put(key, value); + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); } } @@ -141,6 +166,10 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> V value = null; try (ReleasableLock ignore = writeLock.acquire()) { value = onHeapCache.computeIfAbsent(key, loader); + if (loader.isLoaded()) { + // The value was just computed and added to the cache + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); + } } return value; } @@ -152,9 +181,14 @@ public void invalidate(ICacheKey key) { // We are trying to invalidate the key from all caches though it would be present in only of them. // Doing this as we don't know where it is located. We could do a get from both and check that, but what will // also trigger a hit/miss listener event, so ignoring it for now. + // We don't update stats here, as this is handled by the removal listeners for the tiers. try (ReleasableLock ignore = writeLock.acquire()) { - for (ICache cache : cacheList) { - cache.invalidate(key); + for (Tuple, String> pair : cacheAndTierValueList) { + if (key.getDropStatsForDimensions()) { + List dimensionValues = addTierValueToDimensionValues(key.dimensions, pair.v2()); + statsHolder.removeDimensions(dimensionValues); + } + pair.v1().invalidate(key); } } } @@ -166,6 +200,7 @@ public void invalidateAll() { cache.invalidateAll(); } } + statsHolder.reset(); } /** @@ -181,11 +216,7 @@ public Iterable> keys() { @Override public long count() { - long count = 0; - for (ICache cache : cacheList) { - count += cache.count(); - } - return count; + return statsHolder.count(); } @Override @@ -206,19 +237,21 @@ public void close() throws IOException { @Override public ImmutableCacheStatsHolder stats() { - return null; // TODO: in TSC stats PR + return statsHolder.getImmutableCacheStatsHolder(); } private Function, V> getValueFromTieredCache() { return key -> { try (ReleasableLock ignore = readLock.acquire()) { - for (ICache cache : cacheList) { - V value = cache.get(key); + for (Tuple, String> pair : cacheAndTierValueList) { + V value = pair.v1().get(key); + // Get the tier value corresponding to this cache + List dimensionValues = addTierValueToDimensionValues(key.dimensions, pair.v2()); if (value != null) { - // update hit stats + statsHolder.incrementHits(dimensionValues); return value; } else { - // update miss stats + statsHolder.incrementMisses(dimensionValues); } } } @@ -226,6 +259,67 @@ private Function, V> getValueFromTieredCache() { }; } + void handleRemovalFromHeapTier(RemovalNotification, V> notification) { + ICacheKey key = notification.getKey(); + + boolean wasEvicted = false; + if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) + || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { + try (ReleasableLock ignore = writeLock.acquire()) { + if (evaluatePolicies(notification.getValue())) { + diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats + updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); + } else { + removalListener.onRemoval(notification); // The value is leaving the TSC entirely if it doesn't enter disk cache + } + } + wasEvicted = true; + } + + else { + // If the removal was for another reason, send this notification to the TSC's removal listener, as the value is leaving the TSC + // entirely + removalListener.onRemoval(notification); + } + updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue()); + } + + void handleRemovalFromDiskTier(RemovalNotification, V> notification) { + // Values removed from the disk tier leave the TSC entirely + removalListener.onRemoval(notification); + + boolean wasEvicted = false; + if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) + || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { + wasEvicted = true; + } + updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue()); + } + + void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICacheKey key, V value) { + List dimensionValues = addTierValueToDimensionValues(key.dimensions, removedFromTierValue); + if (wasEvicted) { + statsHolder.incrementEvictions(dimensionValues); + } + statsHolder.decrementEntries(dimensionValues); + statsHolder.decrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); + } + + void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { + List dimensionValues = addTierValueToDimensionValues(key.dimensions, destinationTierValue); + statsHolder.incrementEntries(dimensionValues); + statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); + } + + /** + * Add tierValue to the end of a copy of the initial dimension values. + */ + private List addTierValueToDimensionValues(List initialDimensions, String tierValue) { + List result = new ArrayList<>(initialDimensions); + result.add(tierValue); + return result; + } + boolean evaluatePolicies(V value) { for (Predicate policy : policies) { if (!policy.test(value)) { @@ -235,6 +329,38 @@ boolean evaluatePolicies(V value) { return true; } + /** + * A class which receives removal events from the heap tier. + */ + private class HeapTierRemovalListener implements RemovalListener, V> { + private final TieredSpilloverCache tsc; + + HeapTierRemovalListener(TieredSpilloverCache tsc) { + this.tsc = tsc; + } + + @Override + public void onRemoval(RemovalNotification, V> notification) { + tsc.handleRemovalFromHeapTier(notification); + } + } + + /** + * A class which receives removal events from the disk tier. + */ + private class DiskTierRemovalListener implements RemovalListener, V> { + private final TieredSpilloverCache tsc; + + DiskTierRemovalListener(TieredSpilloverCache tsc) { + this.tsc = tsc; + } + + @Override + public void onRemoval(RemovalNotification, V> notification) { + tsc.handleRemovalFromDiskTier(notification); + } + } + /** * ConcatenatedIterables which combines cache iterables and supports remove() functionality as well if underlying * iterator supports it. From 324564cb58a8e07b2fcee6bc7e291d5ff075a57a Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 12:50:12 -0700 Subject: [PATCH 02/20] Updates TSC tests to use stats Signed-off-by: Peter Alfonsi --- .../cache/common/tier/MockDiskCache.java | 1 + .../tier/TieredSpilloverCacheTests.java | 117 ++++++++++++++++-- 2 files changed, 108 insertions(+), 10 deletions(-) diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 0d98503af635f..522eabd39c3c9 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -73,6 +73,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> @Override public void invalidate(ICacheKey key) { + removalListener.onRemoval(new RemovalNotification<>(key, cache.get(key), RemovalReason.INVALIDATED)); this.cache.remove(key); } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index bf9f8fd22d793..0df2a6a840e1e 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -16,6 +16,8 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; @@ -28,6 +30,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -42,11 +45,12 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.cache.common.tier.TieredSpilloverCache.TIER_DIMENSION_VALUE_DISK; +import static org.opensearch.cache.common.tier.TieredSpilloverCache.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; public class TieredSpilloverCacheTests extends OpenSearchTestCase { - // TODO: TSC stats impl is in a future PR. Parts of tests which use stats values are missing for now. static final List dimensionNames = List.of("dim1", "dim2", "dim3"); private ClusterSettings clusterSettings; @@ -87,6 +91,9 @@ public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); } assertEquals(0, removalListener.evictionsMetric.count()); + assertEquals(numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); // Try to hit cache again with some randomization. int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); @@ -105,6 +112,13 @@ public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception } } assertEquals(0, removalListener.evictionsMetric.count()); + assertEquals(cacheHit, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(numOfItems1 + cacheMiss, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(numOfItems1 + cacheMiss, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); } public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception { @@ -172,12 +186,25 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); tieredSpilloverCache.computeIfAbsent(getICacheKey(key), tieredCacheLoader); } + + int expectedDiskEntries = numOfItems1 - onHeapCacheSize; tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); - // Verify on heap cache size. + // Verify on heap cache stats. assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); - // Verify disk cache size. - assertEquals(numOfItems1 - onHeapCacheSize, tieredSpilloverCache.getDiskCache().count()); + assertEquals(onHeapCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(expectedDiskEntries, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(onHeapCacheSize * keyValueSize, getSizeInBytesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + + // Verify disk cache stats. + assertEquals(expectedDiskEntries, tieredSpilloverCache.getDiskCache().count()); + assertEquals(expectedDiskEntries, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(expectedDiskEntries * keyValueSize, getSizeInBytesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); } public void testWithFactoryCreationWithOnHeapCacheNotPresent() { @@ -324,6 +351,15 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); } + long actualDiskCacheSize = tieredSpilloverCache.getDiskCache().count(); + + assertEquals(numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(actualDiskCacheSize, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(onHeapCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(onHeapCacheSize * keyValueSize, getSizeInBytesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(actualDiskCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); @@ -353,6 +389,11 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), tieredCacheLoader); cacheMiss++; } + + assertEquals(numOfItems1 + cacheMiss + diskCacheHit, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(onHeapCacheHit, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(cacheMiss + numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(diskCacheHit, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); } public void testComputeIfAbsentWithEvictionsFromTieredCache() throws Exception { @@ -382,8 +423,13 @@ public void testComputeIfAbsentWithEvictionsFromTieredCache() throws Exception { tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), tieredCacheLoader); } - int evictions = numOfItems - (totalSize); + int evictions = numOfItems - (totalSize); // Evictions from the cache as a whole assertEquals(evictions, removalListener.evictionsMetric.count()); + assertEquals(evictions, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals( + evictions + getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK), + getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP) + ); } public void testGetAndCount() throws Exception { @@ -439,7 +485,7 @@ public void testGetAndCount() throws Exception { assertEquals(numOfItems1, tieredSpilloverCache.count()); } - public void testPut() { + public void testPut() throws Exception { int onHeapCacheSize = randomIntBetween(10, 30); int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); int keyValueSize = 50; @@ -462,6 +508,8 @@ public void testPut() { ICacheKey key = getICacheKey(UUID.randomUUID().toString()); String value = UUID.randomUUID().toString(); tieredSpilloverCache.put(key, value); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(1, tieredSpilloverCache.count()); } public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { @@ -494,6 +542,9 @@ public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), getLoadAwareCacheLoader()); } + assertEquals(onHeapCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(0, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + // Again try to put OnHeap cache capacity amount of new items. List> newKeyList = new ArrayList<>(); for (int i = 0; i < onHeapCacheSize; i++) { @@ -512,9 +563,11 @@ public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); } + assertEquals(onHeapCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(onHeapCacheSize, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); } - public void testInvalidate() { + public void testInvalidate() throws Exception { int onHeapCacheSize = 1; int diskCacheSize = 10; int keyValueSize = 20; @@ -538,11 +591,12 @@ public void testInvalidate() { String value = UUID.randomUUID().toString(); // First try to invalidate without the key present in cache. tieredSpilloverCache.invalidate(key); - // assertEquals(0, tieredSpilloverCache.stats().getEvictionsByDimensions(HEAP_DIMS)); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); // Now try to invalidate with the key present in onHeap cache. tieredSpilloverCache.put(key, value); tieredSpilloverCache.invalidate(key); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); // Evictions metric shouldn't increase for invalidations. assertEquals(0, tieredSpilloverCache.count()); @@ -552,11 +606,15 @@ public void testInvalidate() { tieredSpilloverCache.put(key2, UUID.randomUUID().toString()); assertEquals(2, tieredSpilloverCache.count()); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); // Again invalidate older key, leaving one in heap tier and zero in disk tier tieredSpilloverCache.invalidate(key); + assertEquals(0, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(0, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); assertEquals(1, tieredSpilloverCache.count()); - } public void testCacheKeys() throws Exception { @@ -792,7 +850,7 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio // Put first key on tiered cache. Will go into onHeap cache. tieredSpilloverCache.computeIfAbsent(keyToBeEvicted, getLoadAwareCacheLoader()); - // assertEquals(1, tieredSpilloverCache.stats().getEntriesByDimensions(HEAP_DIMS)); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); CountDownLatch countDownLatch = new CountDownLatch(1); CountDownLatch countDownLatch1 = new CountDownLatch(1); // Put second key on tiered cache. Will cause eviction of first key from onHeap cache and should go into @@ -830,6 +888,10 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); assertEquals(1, onDiskCache.count()); + + assertEquals(1, getEvictionsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(1, getEntriesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK)); assertNotNull(onDiskCache.get(keyToBeEvicted)); } @@ -1136,4 +1198,39 @@ private TieredSpilloverCache intializeTieredSpilloverCache( } return builder.build(); } + + // Helper functions for extracting tier aggregated stats. + private long getHitsForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + return getStatsSnapshotForTier(tsc, tierValue).getHits(); + } + + private long getMissesForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + return getStatsSnapshotForTier(tsc, tierValue).getMisses(); + } + + private long getEvictionsForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + return getStatsSnapshotForTier(tsc, tierValue).getEvictions(); + } + + private long getSizeInBytesForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + return getStatsSnapshotForTier(tsc, tierValue).getSizeInBytes(); + } + + private long getEntriesForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + return getStatsSnapshotForTier(tsc, tierValue).getEntries(); + } + + private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { + ImmutableCacheStatsHolder cacheStats = tsc.stats(); + // Since we always use the same list of dimensions from getMockDimensions() in keys for these tests, we can get all the stats values + // for a given tier with a single node in MDCS + List mockDimensions = getMockDimensions(); + mockDimensions.add(tierValue); + ImmutableCacheStats snapshot = cacheStats.getStatsForDimensionValues(mockDimensions); + if (snapshot == null) { + return new ImmutableCacheStats(0, 0, 0, 0, 0); // This can happen if no cache actions have happened for this set of + // dimensions yet + } + return snapshot; + } } From 2ca803634a0c719a72004f286321f73611810735 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 13:00:50 -0700 Subject: [PATCH 03/20] Fixed bug in TSC invalidate() Signed-off-by: Peter Alfonsi --- .../opensearch/cache/common/tier/TieredSpilloverCache.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 282c593f17544..5698afba88abb 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -188,7 +188,9 @@ public void invalidate(ICacheKey key) { List dimensionValues = addTierValueToDimensionValues(key.dimensions, pair.v2()); statsHolder.removeDimensions(dimensionValues); } - pair.v1().invalidate(key); + if (key.key != null) { + pair.v1().invalidate(key); + } } } } From 3f827e9e4f12be0d6b7f3693ba82f88280bd517e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 16 Apr 2024 09:36:23 -0700 Subject: [PATCH 04/20] Changelog Signed-off-by: Peter Alfonsi --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88a8f57c0afdc..d7ea8d1e352ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered Caching] Add dimension-based stats to ICache implementations. ([#12531](https://github.com/opensearch-project/OpenSearch/pull/12531)) - Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) +- [Tiered Caching] Add dimension-based stats to TieredSpilloverCache ([#13236](https://github.com/opensearch-project/OpenSearch/pull/13236)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) From 298358f8c37c9b70403cc2727f44c42357369334 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 16 Apr 2024 11:50:42 -0700 Subject: [PATCH 05/20] Javadoc Signed-off-by: Peter Alfonsi --- .../opensearch/cache/common/tier/TieredSpilloverCache.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 5698afba88abb..dab40e22c1a96 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -82,8 +82,14 @@ public class TieredSpilloverCache implements ICache { private final List> policies; // Common values used for tier dimension + + /** The name for the tier dimension. */ public static final String TIER_DIMENSION_NAME = "tier"; + + /** Dimension value for on-heap cache, like OpenSearchOnHeapCache.*/ public static final String TIER_DIMENSION_VALUE_ON_HEAP = "on_heap"; + + /** Dimension value for on-disk cache, like EhcacheDiskCache. */ public static final String TIER_DIMENSION_VALUE_DISK = "disk"; TieredSpilloverCache(Builder builder) { From fade204dd386886838de1146fc3475bc6a2f54a4 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 18 Apr 2024 11:54:39 -0700 Subject: [PATCH 06/20] Streamlined removal listener logic + renamed fn Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 38 +++++++------------ 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index dab40e22c1a96..51ce2436593a0 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -60,10 +60,11 @@ public class TieredSpilloverCache implements ICache { private final ICache diskCache; private final ICache onHeapCache; + // Removal listeners for the individual tiers private final RemovalListener, V> onDiskRemovalListener; private final RemovalListener, V> onHeapRemovalListener; - // The listener for removals from the spillover cache as a whole + // Removal listener from the spillover cache as a whole private final RemovalListener, V> removalListener; // In future we want to just read the stats from the individual tiers' statsHolder objects, but this isn't @@ -134,7 +135,7 @@ public class TieredSpilloverCache implements ICache { new Tuple<>(diskCache, TIER_DIMENSION_VALUE_DISK) ); // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole - this.statsHolder = new CacheStatsHolder(addTierValueToDimensionValues(dimensionNames, TIER_DIMENSION_NAME)); + this.statsHolder = new CacheStatsHolder(getDimensionsWithTierValue(dimensionNames, TIER_DIMENSION_NAME)); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list } @@ -191,7 +192,7 @@ public void invalidate(ICacheKey key) { try (ReleasableLock ignore = writeLock.acquire()) { for (Tuple, String> pair : cacheAndTierValueList) { if (key.getDropStatsForDimensions()) { - List dimensionValues = addTierValueToDimensionValues(key.dimensions, pair.v2()); + List dimensionValues = getDimensionsWithTierValue(key.dimensions, pair.v2()); statsHolder.removeDimensions(dimensionValues); } if (key.key != null) { @@ -254,7 +255,7 @@ private Function, V> getValueFromTieredCache() { for (Tuple, String> pair : cacheAndTierValueList) { V value = pair.v1().get(key); // Get the tier value corresponding to this cache - List dimensionValues = addTierValueToDimensionValues(key.dimensions, pair.v2()); + List dimensionValues = getDimensionsWithTierValue(key.dimensions, pair.v2()); if (value != null) { statsHolder.incrementHits(dimensionValues); return value; @@ -269,22 +270,14 @@ private Function, V> getValueFromTieredCache() { void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); - boolean wasEvicted = false; - if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) - || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { + if (SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { - if (evaluatePolicies(notification.getValue())) { - diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats - updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); - } else { - removalListener.onRemoval(notification); // The value is leaving the TSC entirely if it doesn't enter disk cache - } + diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats + updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); } wasEvicted = true; - } - - else { + } else { // If the removal was for another reason, send this notification to the TSC's removal listener, as the value is leaving the TSC // entirely removalListener.onRemoval(notification); @@ -295,17 +288,12 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification void handleRemovalFromDiskTier(RemovalNotification, V> notification) { // Values removed from the disk tier leave the TSC entirely removalListener.onRemoval(notification); - - boolean wasEvicted = false; - if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) - || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { - wasEvicted = true; - } + boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); updateStatsOnRemoval(TIER_DIMENSION_VALUE_DISK, wasEvicted, notification.getKey(), notification.getValue()); } void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICacheKey key, V value) { - List dimensionValues = addTierValueToDimensionValues(key.dimensions, removedFromTierValue); + List dimensionValues = getDimensionsWithTierValue(key.dimensions, removedFromTierValue); if (wasEvicted) { statsHolder.incrementEvictions(dimensionValues); } @@ -314,7 +302,7 @@ void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICach } void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { - List dimensionValues = addTierValueToDimensionValues(key.dimensions, destinationTierValue); + List dimensionValues = getDimensionsWithTierValue(key.dimensions, destinationTierValue); statsHolder.incrementEntries(dimensionValues); statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); } @@ -322,7 +310,7 @@ void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { /** * Add tierValue to the end of a copy of the initial dimension values. */ - private List addTierValueToDimensionValues(List initialDimensions, String tierValue) { + private List getDimensionsWithTierValue(List initialDimensions, String tierValue) { List result = new ArrayList<>(initialDimensions); result.add(tierValue); return result; From 45ca6d4b028276ac56ad5279377ad639c5f386e4 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 18 Apr 2024 12:27:13 -0700 Subject: [PATCH 07/20] Added TODO comment Signed-off-by: Peter Alfonsi --- .../org/opensearch/cache/common/tier/TieredSpilloverCache.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 51ce2436593a0..e77f440aa0627 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -102,6 +102,8 @@ public class TieredSpilloverCache implements ICache { this.onDiskRemovalListener = new DiskTierRemovalListener(this); this.weigher = Objects.requireNonNull(builder.cacheConfig.getWeigher(), "Weigher can't be null"); + // TODO: Once the feature flag/NoopCacheStatsHolder PR has gone in, use NoopCacheStatsHolder for the tiers, + // to avoid storing redundant stats values. this.onHeapCache = builder.onHeapCacheFactory.create( new CacheConfig.Builder().setRemovalListener(onHeapRemovalListener) .setKeyType(builder.cacheConfig.getKeyType()) From dddeab77d80e077b7e96c3e955f8b82da497ced0 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Wed, 24 Apr 2024 15:28:03 -0700 Subject: [PATCH 08/20] Adds useNoopStats logic to CacheConfig + builder Signed-off-by: Peter Alfonsi --- .../TieredSpilloverCacheIT.java | 2 +- .../common/tier/TieredSpilloverCache.java | 4 +- .../cache/common/tier/MockDiskCache.java | 26 ++++++++-- .../tier/TieredSpilloverCacheTests.java | 48 +++++++++++++++---- .../cache/store/disk/EhcacheDiskCache.java | 11 ++++- .../store/disk/EhCacheDiskCacheTests.java | 32 +++++++++++++ .../cache/store/OpenSearchOnHeapCache.java | 12 +++-- .../cache/store/builders/ICacheBuilder.java | 11 +++++ .../cache/store/config/CacheConfig.java | 13 +++++ .../store/OpenSearchOnHeapCacheTests.java | 33 ++++++++----- 10 files changed, 159 insertions(+), 33 deletions(-) diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java index cbe16a690c104..bfc184cff0566 100644 --- a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java @@ -550,7 +550,7 @@ public MockDiskCachePlugin() {} @Override public Map getCacheFactoryMap() { - return Map.of(MockDiskCache.MockDiskCacheFactory.NAME, new MockDiskCache.MockDiskCacheFactory(0, 1000)); + return Map.of(MockDiskCache.MockDiskCacheFactory.NAME, new MockDiskCache.MockDiskCacheFactory(0, 1000, false)); } @Override diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 58582cc65db87..7fb4866bbddfb 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -108,8 +108,6 @@ public class TieredSpilloverCache implements ICache { this.onDiskRemovalListener = new DiskTierRemovalListener(this); this.weigher = Objects.requireNonNull(builder.cacheConfig.getWeigher(), "Weigher can't be null"); - // TODO: Once the feature flag/NoopCacheStatsHolder PR has gone in, use NoopCacheStatsHolder for the tiers, - // to avoid storing redundant stats values. this.onHeapCache = builder.onHeapCacheFactory.create( new CacheConfig.Builder().setRemovalListener(onHeapRemovalListener) .setKeyType(builder.cacheConfig.getKeyType()) @@ -120,6 +118,7 @@ public class TieredSpilloverCache implements ICache { .setMaxSizeInBytes(builder.cacheConfig.getMaxSizeInBytes()) .setExpireAfterAccess(builder.cacheConfig.getExpireAfterAccess()) .setClusterSettings(builder.cacheConfig.getClusterSettings()) + .setUseNoopStats(true) .build(), builder.cacheType, builder.cacheFactories @@ -132,6 +131,7 @@ public class TieredSpilloverCache implements ICache { .setSettings(builder.cacheConfig.getSettings()) .setWeigher(builder.cacheConfig.getWeigher()) .setDimensionNames(builder.cacheConfig.getDimensionNames()) + .setUseNoopStats(true) .build(), builder.cacheType, builder.cacheFactories diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 522eabd39c3c9..220bd71009040 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -16,11 +16,15 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.stats.CacheStatsHolder; +import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.cache.stats.NoopCacheStatsHolder; import org.opensearch.common.cache.store.builders.ICacheBuilder; import org.opensearch.common.cache.store.config.CacheConfig; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.concurrent.ConcurrentHashMap; @@ -32,12 +36,18 @@ public class MockDiskCache implements ICache { long delay; private final RemovalListener, V> removalListener; + private final CacheStatsHolder statsHolder; // Only update for number of entries; this is only used to test useNoopStats logic in TSC - public MockDiskCache(int maxSize, long delay, RemovalListener, V> removalListener) { + public MockDiskCache(int maxSize, long delay, RemovalListener, V> removalListener, boolean useNoopStats) { this.maxSize = maxSize; this.delay = delay; this.removalListener = removalListener; this.cache = new ConcurrentHashMap, V>(); + if (useNoopStats) { + this.statsHolder = NoopCacheStatsHolder.getInstance(); + } else { + this.statsHolder = new DefaultCacheStatsHolder(List.of()); + } } @Override @@ -50,6 +60,7 @@ public V get(ICacheKey key) { public void put(ICacheKey key, V value) { if (this.cache.size() >= maxSize) { // For simplification this.removalListener.onRemoval(new RemovalNotification<>(key, value, RemovalReason.EVICTED)); + this.statsHolder.decrementEntries(List.of()); } try { Thread.sleep(delay); @@ -57,6 +68,7 @@ public void put(ICacheKey key, V value) { throw new RuntimeException(e); } this.cache.put(key, value); + this.statsHolder.incrementEntries(List.of()); } @Override @@ -97,7 +109,9 @@ public void refresh() {} @Override public ImmutableCacheStatsHolder stats() { - return null; + // To allow testing of useNoopStats logic in TSC, return a dummy ImmutableCacheStatsHolder with the + // right number of entries, unless useNoopStats is true + return statsHolder.getImmutableCacheStatsHolder(); } @Override @@ -110,10 +124,12 @@ public static class MockDiskCacheFactory implements Factory { public static final String NAME = "mockDiskCache"; final long delay; final int maxSize; + final boolean useNoopStats; - public MockDiskCacheFactory(long delay, int maxSize) { + public MockDiskCacheFactory(long delay, int maxSize, boolean useNoopStats) { this.delay = delay; this.maxSize = maxSize; + this.useNoopStats = useNoopStats; } @Override @@ -124,6 +140,7 @@ public ICache create(CacheConfig config, CacheType cacheType, .setMaxSize(maxSize) .setDeliberateDelay(delay) .setRemovalListener(config.getRemovalListener()) + .setUseNoopStats(config.getUseNoopStats()) .build(); } @@ -142,7 +159,8 @@ public static class Builder extends ICacheBuilder { @Override public ICache build() { - return new MockDiskCache(this.maxSize, this.delay, this.getRemovalListener()); + boolean useNoopStats = getUseNoopStats(); + return new MockDiskCache(this.maxSize, this.delay, this.getRemovalListener(), getUseNoopStats()); } public Builder setMaxSize(int maxSize) { diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 68234adc37992..5e08ca5f3a11f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -174,7 +174,7 @@ public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), MockDiskCache.MockDiskCacheFactory.NAME, - new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300), false) ) ); @@ -249,7 +249,7 @@ public void testWithFactoryCreationWithOnHeapCacheNotPresent() { OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), MockDiskCache.MockDiskCacheFactory.NAME, - new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300), false) ) ) ); @@ -294,7 +294,7 @@ public void testWithFactoryCreationWithDiskCacheNotPresent() { OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), MockDiskCache.MockDiskCacheFactory.NAME, - new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300), false) ) ) ); @@ -334,7 +334,7 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { .setClusterSettings(clusterSettings) .build(); - ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize); + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize, false); TieredSpilloverCache tieredSpilloverCache = new TieredSpilloverCache.Builder() .setOnHeapCacheFactory(onHeapCacheFactory) @@ -818,7 +818,7 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); - ICache.Factory diskCacheFactory = new MockDiskCache.MockDiskCacheFactory(500, diskCacheSize); + ICache.Factory diskCacheFactory = new MockDiskCache.MockDiskCacheFactory(500, diskCacheSize, false); CacheConfig cacheConfig = new CacheConfig.Builder().setKeyType(String.class) .setKeyType(String.class) .setWeigher((k, v) -> 150) @@ -1023,7 +1023,7 @@ public CachedQueryResult.PolicyValues apply(String s) { OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), MockDiskCache.MockDiskCacheFactory.NAME, - new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300), false) ) ); @@ -1115,7 +1115,6 @@ public void testGetPutAndInvalidateWithDiskCacheDisabled() throws Exception { int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); int keyValueSize = 50; int totalSize = onHeapCacheSize + diskCacheSize; - MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( keyValueSize, @@ -1184,6 +1183,39 @@ public void testGetPutAndInvalidateWithDiskCacheDisabled() throws Exception { assertEquals(0, tieredSpilloverCache.count()); } + public void testTiersDoNotTrackStats() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + // do some gets to put entries in both tiers + int numMisses = onHeapCacheSize + randomIntBetween(10, 20); + for (int iter = 0; iter < numMisses; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertNotEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats()); + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.getOnHeapCache().stats().getTotalStats()); + ImmutableCacheStats diskStats = tieredSpilloverCache.getDiskCache().stats().getTotalStats(); + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), diskStats); + } + private List getMockDimensions() { List dims = new ArrayList<>(); for (String dimensionName : dimensionNames) { @@ -1299,7 +1331,7 @@ private TieredSpilloverCache intializeTieredSpilloverCache( ) .setClusterSettings(clusterSettings) .build(); - ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize); + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize, false); TieredSpilloverCache.Builder builder = new TieredSpilloverCache.Builder().setCacheType( CacheType.INDICES_REQUEST_CACHE diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index eea13ce70ccb5..9bbff1a7ef46c 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -27,6 +27,7 @@ import org.opensearch.common.cache.stats.CacheStatsHolder; import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.cache.stats.NoopCacheStatsHolder; import org.opensearch.common.cache.store.builders.ICacheBuilder; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.collect.Tuple; @@ -163,8 +164,13 @@ private EhcacheDiskCache(Builder builder) { this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener(), builder.getWeigher()); this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); List dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - // If this cache is being used, FeatureFlags.PLUGGABLE_CACHE is already on, so we can always use the DefaultCacheStatsHolder. - this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); + if (builder.getUseNoopStats()) { + this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); + } else { + // If this cache is being used, FeatureFlags.PLUGGABLE_CACHE is already on, so we can always use the DefaultCacheStatsHolder + // unless useNoopStats is explicitly set in CacheConfig. + this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); + } } @SuppressWarnings({ "rawtypes" }) @@ -414,6 +420,7 @@ public Iterable> keys() { /** * Gives the current count of keys in disk cache. + * If useNoopStats is set to true in the builder, always returns 0. * @return current count of keys */ @Override diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 06ebed08d7525..b8a12e6475075 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -849,6 +849,38 @@ public void testInvalidateWithDropDimensions() throws Exception { } } + public void testNoopStats() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener removalListener = new MockRemovalListener<>(); + ToLongBiFunction, String> weigher = getWeigher(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache ehcacheTest = new EhcacheDiskCache.Builder().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setDimensionNames(List.of(dimensionName)) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .setWeigher(weigher) + .setUseNoopStats(true) + .build(); + int randomKeys = randomIntBetween(10, 100); + for (int i = 0; i < randomKeys; i++) { + ICacheKey iCacheKey = getICacheKey(UUID.randomUUID().toString()); + ehcacheTest.put(iCacheKey, UUID.randomUUID().toString()); + assertEquals(0, ehcacheTest.count()); // Expect count of 0 if NoopCacheStatsHolder is used + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), ehcacheTest.stats().getTotalStats()); + } + ehcacheTest.close(); + } + } + private List getRandomDimensions(List dimensionNames) { Random rand = Randomness.get(); int bound = 3; diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 35c951e240a3a..64dad9c36b14b 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -53,6 +53,7 @@ public class OpenSearchOnHeapCache implements ICache, RemovalListene private final RemovalListener, V> removalListener; private final List dimensionNames; private final ToLongBiFunction, V> weigher; + private final boolean useNoopStats; public OpenSearchOnHeapCache(Builder builder) { CacheBuilder, V> cacheBuilder = CacheBuilder., V>builder() @@ -64,8 +65,7 @@ public OpenSearchOnHeapCache(Builder builder) { } cache = cacheBuilder.build(); this.dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - // Use noop stats when pluggable caching is off - boolean useNoopStats = !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(builder.getSettings()); + this.useNoopStats = builder.getUseNoopStats(); if (useNoopStats) { this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); } else { @@ -171,8 +171,9 @@ public static class OpenSearchOnHeapCacheFactory implements Factory { public ICache create(CacheConfig config, CacheType cacheType, Map cacheFactories) { Map> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); Settings settings = config.getSettings(); + boolean useNoopStats = useNoopStats(config.getSettings(), config.getUseNoopStats()); ICacheBuilder builder = new Builder().setDimensionNames(config.getDimensionNames()) - .setSettings(config.getSettings()) + .setUseNoopStats(useNoopStats) .setMaximumWeightInBytes(((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes()) .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) .setWeigher(config.getWeigher()) @@ -193,6 +194,11 @@ public ICache create(CacheConfig config, CacheType cacheType, public String getCacheName() { return NAME; } + + private boolean useNoopStats(Settings settings, boolean configUseNoopStats) { + // Use noop stats when pluggable caching is off, or when explicitly set in the CacheConfig + return !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || configUseNoopStats; + } } /** diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java index ac90fcc85ffef..66c7aa9e709d7 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java @@ -37,6 +37,8 @@ public abstract class ICacheBuilder { private RemovalListener, V> removalListener; + private boolean useNoopStats; + public ICacheBuilder() {} public ICacheBuilder setMaximumWeightInBytes(long sizeInBytes) { @@ -64,6 +66,11 @@ public ICacheBuilder setRemovalListener(RemovalListener, V> r return this; } + public ICacheBuilder setUseNoopStats(boolean useNoopStats) { + this.useNoopStats = useNoopStats; + return this; + } + public long getMaxWeightInBytes() { return maxWeightInBytes; } @@ -84,5 +91,9 @@ public Settings getSettings() { return settings; } + public boolean getUseNoopStats() { + return useNoopStats; + } + public abstract ICache build(); } diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java index 15cbdbd021d71..60d524ee51401 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -68,6 +68,8 @@ public class CacheConfig { private final ClusterSettings clusterSettings; + private final boolean useNoopStats; + private CacheConfig(Builder builder) { this.keyType = builder.keyType; this.valueType = builder.valueType; @@ -81,6 +83,7 @@ private CacheConfig(Builder builder) { this.maxSizeInBytes = builder.maxSizeInBytes; this.expireAfterAccess = builder.expireAfterAccess; this.clusterSettings = builder.clusterSettings; + this.useNoopStats = builder.useNoopStats; } public Class getKeyType() { @@ -131,6 +134,10 @@ public ClusterSettings getClusterSettings() { return clusterSettings; } + public boolean getUseNoopStats() { + return useNoopStats; + } + /** * Builder class to build Cache config related parameters. * @param Type of key. @@ -155,6 +162,7 @@ public static class Builder { private TimeValue expireAfterAccess; private ClusterSettings clusterSettings; + private boolean useNoopStats; public Builder() {} @@ -218,6 +226,11 @@ public Builder setClusterSettings(ClusterSettings clusterSettings) { return this; } + public Builder setUseNoopStats(boolean useNoopStats) { + this.useNoopStats = useNoopStats; + return this; + } + public CacheConfig build() { return new CacheConfig<>(this); } diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 00dbf43bc37be..9a999ccc7a2c5 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -39,7 +39,7 @@ public void testStats() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, false); // When the pluggable caches setting is on, we should get stats as expected from cache.stats(). @@ -82,21 +82,26 @@ public void testStats() throws Exception { } public void testStatsWithoutPluggableCaches() throws Exception { - // When the pluggable caches setting is off, we should get all-zero stats from cache.stats(), but count() should still work. + // When the pluggable caches setting is off, or when we manually set useNoopStats = true in the config, + // we should get all-zero stats from cache.stats(), but count() should still work. MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, false); - List> keysAdded = new ArrayList<>(); - int numAdded = maxKeys + numEvicted; - for (int i = 0; i < numAdded; i++) { - ICacheKey key = getICacheKey(UUID.randomUUID().toString()); - keysAdded.add(key); - cache.computeIfAbsent(key, getLoadAwareCacheLoader()); + OpenSearchOnHeapCache pluggableCachesOffCache = getCache(maxKeys, listener, false, false); + OpenSearchOnHeapCache manuallySetNoopStatsCache = getCache(maxKeys, listener, true, true); + List> caches = List.of(pluggableCachesOffCache, manuallySetNoopStatsCache); - assertEquals(Math.min(maxKeys, i + 1), cache.count()); - assertZeroStats(cache.stats()); + for (OpenSearchOnHeapCache cache : caches) { + int numAdded = maxKeys + numEvicted; + for (int i = 0; i < numAdded; i++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + cache.computeIfAbsent(key, getLoadAwareCacheLoader()); + + assertEquals(Math.min(maxKeys, i + 1), cache.count()); + ImmutableCacheStatsHolder stats = cache.stats(); + assertZeroStats(cache.stats()); + } } } @@ -107,7 +112,8 @@ private void assertZeroStats(ImmutableCacheStatsHolder stats) { private OpenSearchOnHeapCache getCache( int maxSizeKeys, MockRemovalListener listener, - boolean pluggableCachesSetting + boolean pluggableCachesSetting, + boolean useNoopStatsInConfig ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); Settings settings = Settings.builder() @@ -127,6 +133,7 @@ private OpenSearchOnHeapCache getCache( .setSettings(settings) .setDimensionNames(dimensionNames) .setMaxSizeInBytes(maxSizeKeys * keyValueSize) + .setUseNoopStats(useNoopStatsInConfig) .build(); return (OpenSearchOnHeapCache) onHeapCacheFactory.create(cacheConfig, CacheType.INDICES_REQUEST_CACHE, null); } @@ -134,7 +141,7 @@ private OpenSearchOnHeapCache getCache( public void testInvalidateWithDropDimensions() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = 50; - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, false); List> keysAdded = new ArrayList<>(); From 353bc71b5dd7d4e944f66c52de70a12e8a4a8f8d Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Fri, 26 Apr 2024 11:53:34 -0700 Subject: [PATCH 09/20] Fixed tier stats adding issue Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 36 +---- .../tier/TieredSpilloverCacheStatsHolder.java | 145 ++++++++++++++++++ .../tier/TieredSpilloverCacheTests.java | 129 ++++++++++++---- .../cache/stats/DefaultCacheStatsHolder.java | 42 ++--- 4 files changed, 270 insertions(+), 82 deletions(-) create mode 100644 modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 7fb4866bbddfb..bfd8e5416fb0d 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -18,8 +18,6 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.policy.CachedQueryResult; -import org.opensearch.common.cache.stats.CacheStatsHolder; -import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.collect.Tuple; @@ -44,6 +42,8 @@ import java.util.function.ToLongBiFunction; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; /** * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap @@ -73,7 +73,7 @@ public class TieredSpilloverCache implements ICache { // In future we want to just read the stats from the individual tiers' statsHolder objects, but this isn't // possible right now because of the way computeIfAbsent is implemented. - private final CacheStatsHolder statsHolder; + private final TieredSpilloverCacheStatsHolder statsHolder; private ToLongBiFunction, V> weigher; private final List dimensionNames; ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); @@ -86,17 +86,6 @@ public class TieredSpilloverCache implements ICache { private final Map, String> tierValueMap; private final List> policies; - // Common values used for tier dimension - - /** The name for the tier dimension. */ - public static final String TIER_DIMENSION_NAME = "tier"; - - /** Dimension value for on-heap cache, like OpenSearchOnHeapCache.*/ - public static final String TIER_DIMENSION_VALUE_ON_HEAP = "on_heap"; - - /** Dimension value for on-disk cache, like EhcacheDiskCache. */ - public static final String TIER_DIMENSION_VALUE_DISK = "disk"; - TieredSpilloverCache(Builder builder) { Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); @@ -148,7 +137,7 @@ public class TieredSpilloverCache implements ICache { diskCache, TIER_DIMENSION_VALUE_DISK ); // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole - this.statsHolder = new DefaultCacheStatsHolder(getDimensionsWithTierValue(dimensionNames, TIER_DIMENSION_NAME)); + this.statsHolder = new TieredSpilloverCacheStatsHolder(dimensionNames); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list builder.cacheConfig.getClusterSettings() .addSettingsUpdateConsumer(DISK_CACHE_ENABLED_SETTING_MAP.get(builder.cacheType), this::enableDisableDiskCache); @@ -214,7 +203,7 @@ public void invalidate(ICacheKey key) { //for (Tuple, String> pair : cacheAndTierValueList) { for (Map.Entry, String> cacheEntry : tierValueMap.entrySet()) { if (key.getDropStatsForDimensions()) { - List dimensionValues = getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue()); + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue()); statsHolder.removeDimensions(dimensionValues); } if (key.key != null) { @@ -286,7 +275,7 @@ private Function, V> getValueFromTieredCache() { V value = cacheEntry.getKey().get(key); // Get the tier value corresponding to this cache String tierValue = tierValueMap.get(cacheEntry.getKey()); - List dimensionValues = getDimensionsWithTierValue(key.dimensions, tierValue); + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, tierValue); if (value != null) { statsHolder.incrementHits(dimensionValues); return value; @@ -325,7 +314,7 @@ void handleRemovalFromDiskTier(RemovalNotification, V> notification } void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICacheKey key, V value) { - List dimensionValues = getDimensionsWithTierValue(key.dimensions, removedFromTierValue); + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, removedFromTierValue); if (wasEvicted) { statsHolder.incrementEvictions(dimensionValues); } @@ -334,20 +323,11 @@ void updateStatsOnRemoval(String removedFromTierValue, boolean wasEvicted, ICach } void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { - List dimensionValues = getDimensionsWithTierValue(key.dimensions, destinationTierValue); + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, destinationTierValue); statsHolder.incrementEntries(dimensionValues); statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); } - /** - * Add tierValue to the end of a copy of the initial dimension values. - */ - private List getDimensionsWithTierValue(List initialDimensions, String tierValue) { - List result = new ArrayList<>(initialDimensions); - result.add(tierValue); - return result; - } - boolean evaluatePolicies(V value) { for (Predicate policy : policies) { if (!policy.test(value)) { diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java new file mode 100644 index 0000000000000..70399b0717ebf --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -0,0 +1,145 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiConsumer; + +/** + * A tier-aware version of DefaultCacheStatsHolder. Overrides the incrementer functions, as we cannot just add the on-heap + * and disk stats to get a total for the cache as a whole. For example, if the heap tier has 5 misses and the disk tier + * has 4, the total cache has had 4 misses, not 9. The same goes for evictions. Other stats values add normally. + * This means for misses and evictions, if we are incrementing for the on-heap tier, we have to increment only the leaf nodes + * corresponding to the on-heap tier itself, and not its ancestors. + */ +public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder { + + // Common values used for tier dimension + + /** The name for the tier dimension. */ + public static final String TIER_DIMENSION_NAME = "tier"; + + /** Dimension value for on-heap cache, like OpenSearchOnHeapCache.*/ + public static final String TIER_DIMENSION_VALUE_ON_HEAP = "on_heap"; + + /** Dimension value for on-disk cache, like EhcacheDiskCache. */ + public static final String TIER_DIMENSION_VALUE_DISK = "disk"; + + public TieredSpilloverCacheStatsHolder(List originalDimensionNames) { + super(getDimensionNamesWithTier(originalDimensionNames)); + } + + private static List getDimensionNamesWithTier(List dimensionNames) { + List dimensionNamesWithTier = new ArrayList<>(dimensionNames); + dimensionNamesWithTier.add(TIER_DIMENSION_NAME); + return dimensionNamesWithTier; + } + + /** + * Add tierValue to the end of a copy of the initial dimension values, so they can appropriately be used in this stats holder. + */ + List getDimensionsWithTierValue(List initialDimensions, String tierValue) { + List result = new ArrayList<>(initialDimensions); + result.add(tierValue); + return result; + } + + private String getAndCheckTierDimensionValue(List dimensionValues) { + String tierDimensionValue = dimensionValues.get(dimensionValues.size() - 1); + assert tierDimensionValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) || tierDimensionValue.equals(TIER_DIMENSION_VALUE_DISK) + : "Invalid tier dimension value"; + return tierDimensionValue; + } + + private boolean isLeafNode(int depth) { + return depth == dimensionNames.size(); // Not size - 1, because there is also a root node + } + + @Override + public void incrementHits(List dimensionValues) { + getAndCheckTierDimensionValue(dimensionValues); + // Hits from either tier should be included in the total values. + internalIncrement(dimensionValues, (node, depth) -> node.incrementHits(), true); + } + + @Override + public void incrementMisses(List dimensionValues) { + final String tierValue = getAndCheckTierDimensionValue(dimensionValues); + + // Only misses from the disk tier should be included in total values. + BiConsumer missIncrementer = (node, depth) -> { + if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP)) { + // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent + // nodes + if (isLeafNode(depth)) { + node.incrementMisses(); + } + } else { + // If disk tier, increment the leaf node and its parents + node.incrementMisses(); + } + }; + + internalIncrement(dimensionValues, missIncrementer, true); + } + + @Override + public void incrementEvictions(List dimensionValues) { + final String tierValue = getAndCheckTierDimensionValue(dimensionValues); + + // Only evictions from the disk tier should be included in total values. + BiConsumer evictionsIncrementer = (node, depth) -> { + if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP)) { + // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent + // nodes + if (isLeafNode(depth)) { + node.incrementEvictions(); + } + } else { + // If disk tier, increment the leaf node and its parents + node.incrementEvictions(); + } + }; + + internalIncrement(dimensionValues, evictionsIncrementer, true); + } + + @Override + public void incrementSizeInBytes(List dimensionValues, long amountBytes) { + getAndCheckTierDimensionValue(dimensionValues); + // Size from either tier should be included in the total values. + internalIncrement(dimensionValues, (node, depth) -> node.incrementSizeInBytes(amountBytes), true); + } + + // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys + // which have been entirely deleted, for example in an async removal listener. + @Override + public void decrementSizeInBytes(List dimensionValues, long amountBytes) { + getAndCheckTierDimensionValue(dimensionValues); + // Size from either tier should be included in the total values. + internalIncrement(dimensionValues, (node, depth) -> node.decrementSizeInBytes(amountBytes), false); + } + + @Override + public void incrementEntries(List dimensionValues) { + getAndCheckTierDimensionValue(dimensionValues); + // Entries from either tier should be included in the total values. + internalIncrement(dimensionValues, (node, depth) -> node.incrementEntries(), true); + } + + @Override + public void decrementEntries(List dimensionValues) { + getAndCheckTierDimensionValue(dimensionValues); + // Entries from either tier should be included in the total values. + internalIncrement(dimensionValues, (node, depth) -> node.decrementEntries(), false); + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 5e08ca5f3a11f..ef74c633311f7 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -8,6 +8,7 @@ package org.opensearch.cache.common.tier; +import org.opensearch.common.Randomness; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.ICacheKey; @@ -36,6 +37,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.UUID; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -45,10 +47,10 @@ import java.util.function.Function; import java.util.function.Predicate; -import static org.opensearch.cache.common.tier.TieredSpilloverCache.TIER_DIMENSION_VALUE_DISK; -import static org.opensearch.cache.common.tier.TieredSpilloverCache.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; public class TieredSpilloverCacheTests extends OpenSearchTestCase { @@ -1183,39 +1185,100 @@ public void testGetPutAndInvalidateWithDiskCacheDisabled() throws Exception { assertEquals(0, tieredSpilloverCache.count()); } - public void testTiersDoNotTrackStats() throws Exception { - int onHeapCacheSize = randomIntBetween(10, 30); - int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); - int keyValueSize = 50; - MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); - TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( - keyValueSize, - diskCacheSize, - removalListener, - Settings.builder() - .put( - OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) - .get(MAXIMUM_SIZE_IN_BYTES_KEY) - .getKey(), - onHeapCacheSize * keyValueSize + "b" - ) - .build(), - 0 - ); - - // do some gets to put entries in both tiers - int numMisses = onHeapCacheSize + randomIntBetween(10, 20); - for (int iter = 0; iter < numMisses; iter++) { - ICacheKey key = getICacheKey(UUID.randomUUID().toString()); - LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); - tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); - } - assertNotEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats()); - assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.getOnHeapCache().stats().getTotalStats()); - ImmutableCacheStats diskStats = tieredSpilloverCache.getDiskCache().stats().getTotalStats(); - assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), diskStats); + public void testTiersDoNotTrackStats() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + // do some gets to put entries in both tiers + int numMisses = onHeapCacheSize + randomIntBetween(10, 20); + for (int iter = 0; iter < numMisses; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertNotEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats()); + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.getOnHeapCache().stats().getTotalStats()); + ImmutableCacheStats diskStats = tieredSpilloverCache.getDiskCache().stats().getTotalStats(); + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), diskStats); + } + + public void testTierStatsAddCorrectly() throws Exception { + /* We expect the total stats to be: + * totalHits = heapHits + diskHits + * totalMisses = diskMisses + * totalEvictions = diskEvictions + * totalSize = heapSize + diskSize + * totalEntries = heapEntries + diskEntries + */ + + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + List> usedKeys = new ArrayList<>(); + // Fill the cache, getting some entries + evictions for both tiers + int numMisses = onHeapCacheSize + diskCacheSize + randomIntBetween(10, 20); + for (int iter = 0; iter < numMisses; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + usedKeys.add(key); + LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + // Also do some random hits + Random rand = Randomness.get(); + int approxNumHits = 30; + for (int i = 0; i < approxNumHits; i++) { + LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); + ICacheKey key = usedKeys.get(rand.nextInt(usedKeys.size())); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); } + ImmutableCacheStats totalStats = tieredSpilloverCache.stats().getTotalStats(); + ImmutableCacheStats heapStats = getStatsSnapshotForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP); + ImmutableCacheStats diskStats = getStatsSnapshotForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK); + + assertEquals(totalStats.getHits(), heapStats.getHits() + diskStats.getHits()); + assertEquals(totalStats.getMisses(), diskStats.getMisses()); + assertEquals(totalStats.getEvictions(), diskStats.getEvictions()); + assertEquals(totalStats.getSizeInBytes(), heapStats.getSizeInBytes() + diskStats.getSizeInBytes()); + assertEquals(totalStats.getEntries(), heapStats.getEntries() + diskStats.getEntries()); + + // Also check the heap stats don't have zero misses or evictions + assertNotEquals(0, heapStats.getMisses()); + assertNotEquals(0, heapStats.getEvictions()); + } + private List getMockDimensions() { List dims = new ArrayList<>(); for (String dimensionName : dimensionNames) { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java index ad943e0b2ed1a..9662c64e448fb 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -16,7 +16,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; +import java.util.function.BiConsumer; /** * A class ICache implementations use to internally keep track of their stats across multiple dimensions. @@ -32,7 +32,7 @@ public class DefaultCacheStatsHolder implements CacheStatsHolder { // The list of permitted dimensions. Should be ordered from "outermost" to "innermost", as you would like to // aggregate them in an API response. - private final List dimensionNames; + protected final List dimensionNames; // A tree structure based on dimension values, which stores stats values in its leaf nodes. // Non-leaf nodes have stats matching the sum of their children. // We use a tree structure, rather than a map with concatenated keys, to save on memory usage. If there are many leaf @@ -55,39 +55,39 @@ public List getDimensionNames() { // The order has to match the order given in dimensionNames. @Override public void incrementHits(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementHits, true); + internalIncrement(dimensionValues, (node, depth) -> node.incrementHits(), true); } @Override public void incrementMisses(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementMisses, true); + internalIncrement(dimensionValues, (node, depth) -> node.incrementMisses(), true); } @Override public void incrementEvictions(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementEvictions, true); + internalIncrement(dimensionValues, (node, depth) -> node.incrementEvictions(), true); } @Override public void incrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node) -> node.incrementSizeInBytes(amountBytes), true); + internalIncrement(dimensionValues, (node, depth) -> node.incrementSizeInBytes(amountBytes), true); } // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys // which have been entirely deleted, for example in an async removal listener. @Override public void decrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node) -> node.decrementSizeInBytes(amountBytes), false); + internalIncrement(dimensionValues, (node, depth) -> node.decrementSizeInBytes(amountBytes), false); } @Override public void incrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementEntries, true); + internalIncrement(dimensionValues, (node, depth) -> node.incrementEntries(), true); } @Override public void decrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::decrementEntries, false); + internalIncrement(dimensionValues, (node, depth) -> node.decrementEntries(), false); } /** @@ -112,7 +112,7 @@ public long count() { return statsRoot.getEntries(); } - private void internalIncrement(List dimensionValues, Consumer adder, boolean createNodesIfAbsent) { + protected void internalIncrement(List dimensionValues, BiConsumer adder, boolean createNodesIfAbsent) { assert dimensionValues.size() == dimensionNames.size(); // First try to increment without creating nodes boolean didIncrement = internalIncrementHelper(dimensionValues, statsRoot, 0, adder, false); @@ -136,12 +136,12 @@ private boolean internalIncrementHelper( List dimensionValues, Node node, int depth, // Pass in the depth to avoid having to slice the list for each node. - Consumer adder, + BiConsumer adder, boolean createNodesIfAbsent ) { if (depth == dimensionValues.size()) { // This is the leaf node we are trying to reach - adder.accept(node); + adder.accept(node, depth); return true; } @@ -156,7 +156,7 @@ private boolean internalIncrementHelper( } if (internalIncrementHelper(dimensionValues, child, depth + 1, adder, createNodesIfAbsent)) { // Function returns true if the next node down was incremented - adder.accept(node); + adder.accept(node, depth); return true; } return false; @@ -208,7 +208,7 @@ Node getStatsRoot() { return statsRoot; } - static class Node { + protected static class Node { private final String dimensionValue; // Map from dimensionValue to the DimensionNode for that dimension value. final Map children; @@ -240,31 +240,31 @@ protected Map getChildren() { // Functions for modifying internal CacheStatsCounter without callers having to be aware of CacheStatsCounter - void incrementHits() { + public void incrementHits() { this.stats.incrementHits(); } - void incrementMisses() { + public void incrementMisses() { this.stats.incrementMisses(); } - void incrementEvictions() { + public void incrementEvictions() { this.stats.incrementEvictions(); } - void incrementSizeInBytes(long amountBytes) { + public void incrementSizeInBytes(long amountBytes) { this.stats.incrementSizeInBytes(amountBytes); } - void decrementSizeInBytes(long amountBytes) { + public void decrementSizeInBytes(long amountBytes) { this.stats.decrementSizeInBytes(amountBytes); } - void incrementEntries() { + public void incrementEntries() { this.stats.incrementEntries(); } - void decrementEntries() { + public void decrementEntries() { this.stats.decrementEntries(); } From 5ac02105cc7de4c4b31377e27878f936295721d7 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Sun, 28 Apr 2024 15:05:57 -0700 Subject: [PATCH 10/20] spotlessApply Signed-off-by: Peter Alfonsi --- .../cache/common/tier/TieredSpilloverCache.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index bfd8e5416fb0d..625116fbf1041 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -20,7 +20,6 @@ import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -132,10 +131,7 @@ public class TieredSpilloverCache implements ICache { this.caches = Collections.synchronizedMap(cacheListMap); this.dimensionNames = builder.cacheConfig.getDimensionNames(); - this.tierValueMap = Map.of( - onHeapCache, TIER_DIMENSION_VALUE_ON_HEAP, - diskCache, TIER_DIMENSION_VALUE_DISK - ); + this.tierValueMap = Map.of(onHeapCache, TIER_DIMENSION_VALUE_ON_HEAP, diskCache, TIER_DIMENSION_VALUE_DISK); // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole this.statsHolder = new TieredSpilloverCacheStatsHolder(dimensionNames); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list @@ -200,7 +196,7 @@ public void invalidate(ICacheKey key) { // also trigger a hit/miss listener event, so ignoring it for now. // We don't update stats here, as this is handled by the removal listeners for the tiers. try (ReleasableLock ignore = writeLock.acquire()) { - //for (Tuple, String> pair : cacheAndTierValueList) { + // for (Tuple, String> pair : cacheAndTierValueList) { for (Map.Entry, String> cacheEntry : tierValueMap.entrySet()) { if (key.getDropStatsForDimensions()) { List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue()); @@ -292,7 +288,9 @@ private Function, V> getValueFromTieredCache() { void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); boolean wasEvicted = false; - if (caches.get(diskCache) && SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) && evaluatePolicies(notification.getValue())) { + if (caches.get(diskCache) + && SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) + && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); From 7c717456953ccfd4ae93bd2578eba31a2fc3a993 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 09:42:08 -0700 Subject: [PATCH 11/20] Updated logic to handle dynamic disk cache Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 14 ++++------ .../tier/TieredSpilloverCacheStatsHolder.java | 22 ++++++++++----- .../tier/TieredSpilloverCacheTests.java | 28 +++++++++++++++++++ 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 625116fbf1041..aeedcffab2b35 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -133,7 +133,7 @@ public class TieredSpilloverCache implements ICache { this.dimensionNames = builder.cacheConfig.getDimensionNames(); this.tierValueMap = Map.of(onHeapCache, TIER_DIMENSION_VALUE_ON_HEAP, diskCache, TIER_DIMENSION_VALUE_DISK); // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole - this.statsHolder = new TieredSpilloverCacheStatsHolder(dimensionNames); + this.statsHolder = new TieredSpilloverCacheStatsHolder(dimensionNames, isDiskCacheEnabled); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list builder.cacheConfig.getClusterSettings() .addSettingsUpdateConsumer(DISK_CACHE_ENABLED_SETTING_MAP.get(builder.cacheType), this::enableDisableDiskCache); @@ -154,6 +154,7 @@ void enableDisableDiskCache(Boolean isDiskCacheEnabled) { // When disk cache is disabled, we are not clearing up the disk cache entries yet as that should be part of // separate cache/clear API. this.caches.put(diskCache, isDiskCacheEnabled); + this.statsHolder.setDiskCacheEnabled(isDiskCacheEnabled); } @Override @@ -287,18 +288,15 @@ private Function, V> getValueFromTieredCache() { void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); - boolean wasEvicted = false; - if (caches.get(diskCache) - && SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) - && evaluatePolicies(notification.getValue())) { + boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); + if (caches.get(diskCache) && wasEvicted && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); } - wasEvicted = true; } else { - // If the removal was for another reason, send this notification to the TSC's removal listener, as the value is leaving the TSC - // entirely + // If the value is not going to the disk cache, send this notification to the TSC's removal listener + // as the value is leaving the TSC entirely removalListener.onRemoval(notification); } updateStatsOnRemoval(TIER_DIMENSION_VALUE_ON_HEAP, wasEvicted, key, notification.getValue()); diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index 70399b0717ebf..d5fadb7ab8108 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -23,6 +23,9 @@ */ public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder { + /** Whether the disk cache is currently enabled. */ + private boolean diskCacheEnabled; + // Common values used for tier dimension /** The name for the tier dimension. */ @@ -34,8 +37,9 @@ public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder { /** Dimension value for on-disk cache, like EhcacheDiskCache. */ public static final String TIER_DIMENSION_VALUE_DISK = "disk"; - public TieredSpilloverCacheStatsHolder(List originalDimensionNames) { + public TieredSpilloverCacheStatsHolder(List originalDimensionNames, boolean diskCacheEnabled) { super(getDimensionNamesWithTier(originalDimensionNames)); + this.diskCacheEnabled = diskCacheEnabled; } private static List getDimensionNamesWithTier(List dimensionNames) { @@ -75,16 +79,16 @@ public void incrementHits(List dimensionValues) { public void incrementMisses(List dimensionValues) { final String tierValue = getAndCheckTierDimensionValue(dimensionValues); - // Only misses from the disk tier should be included in total values. + // If the disk tier is present, only misses from the disk tier should be included in total values. BiConsumer missIncrementer = (node, depth) -> { - if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP)) { + if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent // nodes if (isLeafNode(depth)) { node.incrementMisses(); } } else { - // If disk tier, increment the leaf node and its parents + // If disk tier, or on-heap tier with a disabled disk tier, increment the leaf node and its parents node.incrementMisses(); } }; @@ -96,16 +100,16 @@ public void incrementMisses(List dimensionValues) { public void incrementEvictions(List dimensionValues) { final String tierValue = getAndCheckTierDimensionValue(dimensionValues); - // Only evictions from the disk tier should be included in total values. + // If the disk tier is present, only evictions from the disk tier should be included in total values. BiConsumer evictionsIncrementer = (node, depth) -> { - if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP)) { + if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent // nodes if (isLeafNode(depth)) { node.incrementEvictions(); } } else { - // If disk tier, increment the leaf node and its parents + // If disk tier, or on-heap tier with a disabled disk tier, increment the leaf node and its parents node.incrementEvictions(); } }; @@ -142,4 +146,8 @@ public void decrementEntries(List dimensionValues) { // Entries from either tier should be included in the total values. internalIncrement(dimensionValues, (node, depth) -> node.decrementEntries(), false); } + + void setDiskCacheEnabled(boolean diskCacheEnabled) { + this.diskCacheEnabled = diskCacheEnabled; + } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index ef74c633311f7..735969c5bb0e1 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -1277,6 +1277,34 @@ public void testTierStatsAddCorrectly() throws Exception { // Also check the heap stats don't have zero misses or evictions assertNotEquals(0, heapStats.getMisses()); assertNotEquals(0, heapStats.getEvictions()); + + // Now turn off the disk tier and do more misses and evictions from the heap tier. + // These should be added to the totals, as the disk tier is now absent + long missesBeforeDisablingDiskCache = totalStats.getMisses(); + long evictionsBeforeDisablingDiskCache = totalStats.getEvictions(); + long heapTierEvictionsBeforeDisablingDiskCache = heapStats.getEvictions(); + + clusterSettings.applySettings( + Settings.builder().put(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), false).build() + ); + + int newMisses = randomIntBetween(10, 30); + for (int i = 0; i < newMisses; i++) { + LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), tieredCacheLoader); + } + + totalStats = tieredSpilloverCache.stats().getTotalStats(); + heapStats = getStatsSnapshotForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP); + assertEquals(missesBeforeDisablingDiskCache + newMisses, totalStats.getMisses()); + assertEquals(heapTierEvictionsBeforeDisablingDiskCache + newMisses, heapStats.getEvictions()); + assertEquals(evictionsBeforeDisablingDiskCache + newMisses, totalStats.getEvictions()); + + // Turn the disk cache back on in cluster settings for other tests + clusterSettings.applySettings( + Settings.builder().put(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), true).build() + ); + } private List getMockDimensions() { From 29c96161a152e73f7724b35533dc8962d383eb1d Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 10:23:52 -0700 Subject: [PATCH 12/20] Javadoc Signed-off-by: Peter Alfonsi --- .../cache/common/tier/TieredSpilloverCacheStatsHolder.java | 5 +++++ .../common/cache/stats/DefaultCacheStatsHolder.java | 3 +++ 2 files changed, 8 insertions(+) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index d5fadb7ab8108..b98f10eb9ca94 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -37,6 +37,11 @@ public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder { /** Dimension value for on-disk cache, like EhcacheDiskCache. */ public static final String TIER_DIMENSION_VALUE_DISK = "disk"; + /** + * Constructor for the stats holder. + * @param originalDimensionNames the original dimension names, not including TIER_DIMENSION_NAME + * @param diskCacheEnabled whether the disk tier starts out enabled + */ public TieredSpilloverCacheStatsHolder(List originalDimensionNames, boolean diskCacheEnabled) { super(getDimensionNamesWithTier(originalDimensionNames)); this.diskCacheEnabled = diskCacheEnabled; diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java index 9662c64e448fb..2b3107484c8de 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -208,6 +208,9 @@ Node getStatsRoot() { return statsRoot; } + /** + * Nodes that make up the tree in the stats holder. + */ protected static class Node { private final String dimensionValue; // Map from dimensionValue to the DimensionNode for that dimension value. From c14a159e3c06f10f302551b40c515f142d49ce6b Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 12:28:13 -0700 Subject: [PATCH 13/20] Addressed Ankit's comments Signed-off-by: Peter Alfonsi --- .../tier/TieredSpilloverCacheStatsHolder.java | 47 +++++++++---------- .../cache/stats/DefaultCacheStatsHolder.java | 35 +++++++++----- 2 files changed, 44 insertions(+), 38 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index b98f10eb9ca94..7f76c8881d674 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -12,14 +12,15 @@ import java.util.ArrayList; import java.util.List; -import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * A tier-aware version of DefaultCacheStatsHolder. Overrides the incrementer functions, as we cannot just add the on-heap * and disk stats to get a total for the cache as a whole. For example, if the heap tier has 5 misses and the disk tier * has 4, the total cache has had 4 misses, not 9. The same goes for evictions. Other stats values add normally. - * This means for misses and evictions, if we are incrementing for the on-heap tier, we have to increment only the leaf nodes - * corresponding to the on-heap tier itself, and not its ancestors. + * This means for misses and evictions, if we are incrementing for the on-heap tier and the disk tier is present, + * we have to increment only the leaf nodes corresponding to the on-heap tier itself, and not its ancestors. + * If the disk tier is not present, we do increment the ancestor nodes. */ public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder { @@ -62,34 +63,30 @@ List getDimensionsWithTierValue(List initialDimensions, String t return result; } - private String getAndCheckTierDimensionValue(List dimensionValues) { + private String validateTierDimensionValue(List dimensionValues) { String tierDimensionValue = dimensionValues.get(dimensionValues.size() - 1); assert tierDimensionValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) || tierDimensionValue.equals(TIER_DIMENSION_VALUE_DISK) : "Invalid tier dimension value"; return tierDimensionValue; } - private boolean isLeafNode(int depth) { - return depth == dimensionNames.size(); // Not size - 1, because there is also a root node - } - @Override public void incrementHits(List dimensionValues) { - getAndCheckTierDimensionValue(dimensionValues); + validateTierDimensionValue(dimensionValues); // Hits from either tier should be included in the total values. - internalIncrement(dimensionValues, (node, depth) -> node.incrementHits(), true); + super.incrementHits(dimensionValues); } @Override public void incrementMisses(List dimensionValues) { - final String tierValue = getAndCheckTierDimensionValue(dimensionValues); + final String tierValue = validateTierDimensionValue(dimensionValues); // If the disk tier is present, only misses from the disk tier should be included in total values. - BiConsumer missIncrementer = (node, depth) -> { + Consumer missIncrementer = (node) -> { if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent // nodes - if (isLeafNode(depth)) { + if (node.isAtLowestLevel()) { node.incrementMisses(); } } else { @@ -97,20 +94,19 @@ public void incrementMisses(List dimensionValues) { node.incrementMisses(); } }; - internalIncrement(dimensionValues, missIncrementer, true); } @Override public void incrementEvictions(List dimensionValues) { - final String tierValue = getAndCheckTierDimensionValue(dimensionValues); + final String tierValue = validateTierDimensionValue(dimensionValues); // If the disk tier is present, only evictions from the disk tier should be included in total values. - BiConsumer evictionsIncrementer = (node, depth) -> { + Consumer evictionsIncrementer = (node) -> { if (tierValue.equals(TIER_DIMENSION_VALUE_ON_HEAP) && diskCacheEnabled) { // If on-heap tier, increment only the leaf node corresponding to the on heap values; not the total values in its parent // nodes - if (isLeafNode(depth)) { + if (node.isAtLowestLevel()) { node.incrementEvictions(); } } else { @@ -118,38 +114,37 @@ public void incrementEvictions(List dimensionValues) { node.incrementEvictions(); } }; - internalIncrement(dimensionValues, evictionsIncrementer, true); } @Override public void incrementSizeInBytes(List dimensionValues, long amountBytes) { - getAndCheckTierDimensionValue(dimensionValues); + validateTierDimensionValue(dimensionValues); // Size from either tier should be included in the total values. - internalIncrement(dimensionValues, (node, depth) -> node.incrementSizeInBytes(amountBytes), true); + super.incrementSizeInBytes(dimensionValues, amountBytes); } // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys // which have been entirely deleted, for example in an async removal listener. @Override public void decrementSizeInBytes(List dimensionValues, long amountBytes) { - getAndCheckTierDimensionValue(dimensionValues); + validateTierDimensionValue(dimensionValues); // Size from either tier should be included in the total values. - internalIncrement(dimensionValues, (node, depth) -> node.decrementSizeInBytes(amountBytes), false); + super.decrementSizeInBytes(dimensionValues, amountBytes); } @Override public void incrementEntries(List dimensionValues) { - getAndCheckTierDimensionValue(dimensionValues); + validateTierDimensionValue(dimensionValues); // Entries from either tier should be included in the total values. - internalIncrement(dimensionValues, (node, depth) -> node.incrementEntries(), true); + super.incrementEntries(dimensionValues); } @Override public void decrementEntries(List dimensionValues) { - getAndCheckTierDimensionValue(dimensionValues); + validateTierDimensionValue(dimensionValues); // Entries from either tier should be included in the total values. - internalIncrement(dimensionValues, (node, depth) -> node.decrementEntries(), false); + super.decrementEntries(dimensionValues); } void setDiskCacheEnabled(boolean diskCacheEnabled) { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java index 2b3107484c8de..f6cf72e36e44f 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -16,7 +16,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.BiConsumer; +import java.util.function.Consumer; /** * A class ICache implementations use to internally keep track of their stats across multiple dimensions. @@ -55,39 +55,39 @@ public List getDimensionNames() { // The order has to match the order given in dimensionNames. @Override public void incrementHits(List dimensionValues) { - internalIncrement(dimensionValues, (node, depth) -> node.incrementHits(), true); + internalIncrement(dimensionValues, Node::incrementHits, true); } @Override public void incrementMisses(List dimensionValues) { - internalIncrement(dimensionValues, (node, depth) -> node.incrementMisses(), true); + internalIncrement(dimensionValues, Node::incrementMisses, true); } @Override public void incrementEvictions(List dimensionValues) { - internalIncrement(dimensionValues, (node, depth) -> node.incrementEvictions(), true); + internalIncrement(dimensionValues, Node::incrementEvictions, true); } @Override public void incrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node, depth) -> node.incrementSizeInBytes(amountBytes), true); + internalIncrement(dimensionValues, (node) -> node.incrementSizeInBytes(amountBytes), true); } // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys // which have been entirely deleted, for example in an async removal listener. @Override public void decrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node, depth) -> node.decrementSizeInBytes(amountBytes), false); + internalIncrement(dimensionValues, (node) -> node.decrementSizeInBytes(amountBytes), false); } @Override public void incrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, (node, depth) -> node.incrementEntries(), true); + internalIncrement(dimensionValues, Node::incrementEntries, true); } @Override public void decrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, (node, depth) -> node.decrementEntries(), false); + internalIncrement(dimensionValues, Node::decrementEntries, false); } /** @@ -112,7 +112,7 @@ public long count() { return statsRoot.getEntries(); } - protected void internalIncrement(List dimensionValues, BiConsumer adder, boolean createNodesIfAbsent) { + protected void internalIncrement(List dimensionValues, Consumer adder, boolean createNodesIfAbsent) { assert dimensionValues.size() == dimensionNames.size(); // First try to increment without creating nodes boolean didIncrement = internalIncrementHelper(dimensionValues, statsRoot, 0, adder, false); @@ -136,12 +136,12 @@ private boolean internalIncrementHelper( List dimensionValues, Node node, int depth, // Pass in the depth to avoid having to slice the list for each node. - BiConsumer adder, + Consumer adder, boolean createNodesIfAbsent ) { if (depth == dimensionValues.size()) { // This is the leaf node we are trying to reach - adder.accept(node, depth); + adder.accept(node); return true; } @@ -156,7 +156,7 @@ private boolean internalIncrementHelper( } if (internalIncrementHelper(dimensionValues, child, depth + 1, adder, createNodesIfAbsent)) { // Function returns true if the next node down was incremented - adder.accept(node, depth); + adder.accept(node); return true; } return false; @@ -305,5 +305,16 @@ ImmutableCacheStatsHolder.Node snapshot() { } return new ImmutableCacheStatsHolder.Node(dimensionValue, snapshotChildren, getImmutableStats()); } + + /** + * Return whether this is a leaf node which is at the lowest level of the tree. + * Does not return true if this is a node at a higher level whose children are still being constructed. + * @return if this is a leaf node at the lowest level + */ + public boolean isAtLowestLevel() { + // Compare by value to the empty children map, to ensure we don't get false positives for nodes + // which are in the process of having children added + return children == EMPTY_CHILDREN_MAP; + } } } From ab3a356580c04282bed84abceaf7ddb127b6f66e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 13:42:31 -0700 Subject: [PATCH 14/20] Addressed Sagar's comments Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 105 +++++++++++++----- .../tier/TieredSpilloverCacheTests.java | 6 +- .../cache/store/OpenSearchOnHeapCache.java | 4 +- 3 files changed, 83 insertions(+), 32 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index aeedcffab2b35..4047d0aaefc71 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -20,6 +20,7 @@ import org.opensearch.common.cache.policy.CachedQueryResult; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -81,8 +82,7 @@ public class TieredSpilloverCache implements ICache { /** * Maintains caching tiers in ascending order of cache latency. */ - private final Map, Boolean> caches; - private final Map, String> tierValueMap; + private final Map, TierInfo> caches; private final List> policies; TieredSpilloverCache(Builder builder) { @@ -125,13 +125,12 @@ public class TieredSpilloverCache implements ICache { builder.cacheFactories ); Boolean isDiskCacheEnabled = DISK_CACHE_ENABLED_SETTING_MAP.get(builder.cacheType).get(builder.cacheConfig.getSettings()); - LinkedHashMap, Boolean> cacheListMap = new LinkedHashMap<>(); - cacheListMap.put(onHeapCache, true); - cacheListMap.put(diskCache, isDiskCacheEnabled); + LinkedHashMap, TierInfo> cacheListMap = new LinkedHashMap<>(); + cacheListMap.put(onHeapCache, new TierInfo(true, TIER_DIMENSION_VALUE_ON_HEAP)); + cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.caches = Collections.synchronizedMap(cacheListMap); this.dimensionNames = builder.cacheConfig.getDimensionNames(); - this.tierValueMap = Map.of(onHeapCache, TIER_DIMENSION_VALUE_ON_HEAP, diskCache, TIER_DIMENSION_VALUE_DISK); // Pass "tier" as the innermost dimension name, in addition to whatever dimensions are specified for the cache as a whole this.statsHolder = new TieredSpilloverCacheStatsHolder(dimensionNames, isDiskCacheEnabled); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list @@ -153,13 +152,17 @@ ICache getDiskCache() { void enableDisableDiskCache(Boolean isDiskCacheEnabled) { // When disk cache is disabled, we are not clearing up the disk cache entries yet as that should be part of // separate cache/clear API. - this.caches.put(diskCache, isDiskCacheEnabled); + this.caches.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.statsHolder.setDiskCacheEnabled(isDiskCacheEnabled); } @Override public V get(ICacheKey key) { - return getValueFromTieredCache().apply(key); + Tuple cacheValueTuple = getValueFromTieredCache(true).apply(key); + if (cacheValueTuple == null) { + return null; + } + return cacheValueTuple.v1(); } @Override @@ -172,22 +175,50 @@ public void put(ICacheKey key, V value) { @Override public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> loader) throws Exception { - V cacheValue = getValueFromTieredCache().apply(key); - if (cacheValue == null) { + // Don't capture stats in the initial getValueFromTieredCache(). If we have concurrent requests for the same key, + // and it only has to be loaded one time, we should report one miss and the rest hits. But, if we do stats in + // getValueFromTieredCache(), + // we will see all misses. Instead, handle stats in computeIfAbsent(). + Tuple cacheValueTuple = getValueFromTieredCache(false).apply(key); + List heapDimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, TIER_DIMENSION_VALUE_ON_HEAP); + List diskDimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, TIER_DIMENSION_VALUE_DISK); + + if (cacheValueTuple == null) { // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. // This is needed as there can be many requests for the same key at the same time and we only want to load // the value once. V value = null; try (ReleasableLock ignore = writeLock.acquire()) { value = onHeapCache.computeIfAbsent(key, loader); - if (loader.isLoaded()) { - // The value was just computed and added to the cache - updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); + } + // Handle stats + if (loader.isLoaded()) { + // The value was just computed and added to the cache by this thread. Register a miss for the heap cache, and the disk cache + // if present + updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); + statsHolder.incrementMisses(heapDimensionValues); + if (caches.get(diskCache).isEnabled) { + statsHolder.incrementMisses(diskDimensionValues); } + } else { + // Another thread requesting this key already loaded the value. Register a hit for the heap cache + statsHolder.incrementHits(heapDimensionValues); } return value; } - return cacheValue; + + else { + // Handle stats for an initial hit from getValueFromTieredCache() + if (cacheValueTuple.v2().equals(TIER_DIMENSION_VALUE_ON_HEAP)) { + // A hit for the heap tier + statsHolder.incrementHits(heapDimensionValues); + } else { + // Miss for the heap tier, hit for the disk tier + statsHolder.incrementMisses(heapDimensionValues); + statsHolder.incrementHits(diskDimensionValues); + } + } + return cacheValueTuple.v1(); } @Override @@ -197,10 +228,9 @@ public void invalidate(ICacheKey key) { // also trigger a hit/miss listener event, so ignoring it for now. // We don't update stats here, as this is handled by the removal listeners for the tiers. try (ReleasableLock ignore = writeLock.acquire()) { - // for (Tuple, String> pair : cacheAndTierValueList) { - for (Map.Entry, String> cacheEntry : tierValueMap.entrySet()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { if (key.getDropStatsForDimensions()) { - List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue()); + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue().tierName); statsHolder.removeDimensions(dimensionValues); } if (key.key != null) { @@ -213,7 +243,7 @@ public void invalidate(ICacheKey key) { @Override public void invalidateAll() { try (ReleasableLock ignore = writeLock.acquire()) { - for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { cacheEntry.getKey().invalidateAll(); } } @@ -228,7 +258,7 @@ public void invalidateAll() { @Override public Iterable> keys() { List>> iterableList = new ArrayList<>(); - for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { iterableList.add(cacheEntry.getKey().keys()); } Iterable>[] iterables = (Iterable>[]) iterableList.toArray(new Iterable[0]); @@ -245,7 +275,7 @@ public long count() { @Override public void refresh() { try (ReleasableLock ignore = writeLock.acquire()) { - for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { cacheEntry.getKey().refresh(); } } @@ -253,7 +283,7 @@ public void refresh() { @Override public void close() throws IOException { - for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { // Close all the caches here irrespective of whether they are enabled or not. cacheEntry.getKey().close(); } @@ -264,19 +294,26 @@ public ImmutableCacheStatsHolder stats() { return statsHolder.getImmutableCacheStatsHolder(); } - private Function, V> getValueFromTieredCache() { + /** + * Get a value from the tiered cache, and the name of the tier it was found in. + * @param captureStats Whether to record hits/misses for this call of the function + * @return A tuple of the value and the name of the tier it was found in. + */ + private Function, Tuple> getValueFromTieredCache(boolean captureStats) { return key -> { try (ReleasableLock ignore = readLock.acquire()) { - for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { - if (cacheEntry.getValue()) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { + if (cacheEntry.getValue().isEnabled) { V value = cacheEntry.getKey().get(key); // Get the tier value corresponding to this cache - String tierValue = tierValueMap.get(cacheEntry.getKey()); + String tierValue = cacheEntry.getValue().tierName; List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, tierValue); if (value != null) { - statsHolder.incrementHits(dimensionValues); - return value; - } else { + if (captureStats) { + statsHolder.incrementHits(dimensionValues); + } + return new Tuple<>(value, tierValue); + } else if (captureStats) { statsHolder.incrementMisses(dimensionValues); } } @@ -289,7 +326,7 @@ private Function, V> getValueFromTieredCache() { void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); - if (caches.get(diskCache) && wasEvicted && evaluatePolicies(notification.getValue())) { + if (caches.get(diskCache).isEnabled && wasEvicted && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); @@ -426,6 +463,16 @@ public void remove() { } } + private class TierInfo { + boolean isEnabled; + String tierName; + + TierInfo(boolean isEnabled, String tierName) { + this.isEnabled = isEnabled; + this.tierName = tierName; + } + } + /** * Factory to create TieredSpilloverCache objects. */ diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 735969c5bb0e1..ce5f85873347a 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -388,7 +388,8 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { assertFalse(loadAwareCacheLoader.isLoaded()); } } - for (int iter = 0; iter < randomIntBetween(50, 200); iter++) { + int numRandom = randomIntBetween(50, 200); + for (int iter = 0; iter < numRandom; iter++) { // Hit cache with randomized key which is expected to miss cache always. LoadAwareCacheLoader, String> tieredCacheLoader = getLoadAwareCacheLoader(); tieredSpilloverCache.computeIfAbsent(getICacheKey(UUID.randomUUID().toString()), tieredCacheLoader); @@ -812,6 +813,9 @@ public String load(ICacheKey key) { } } assertEquals(1, numberOfTimesKeyLoaded); // It should be loaded only once. + // We should see only one heap miss, and the rest hits + assertEquals(1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); + assertEquals(numberOfSameKeys - 1, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP)); } public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exception { diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 64dad9c36b14b..b30843ac53929 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -195,9 +195,9 @@ public String getCacheName() { return NAME; } - private boolean useNoopStats(Settings settings, boolean configUseNoopStats) { + private boolean useNoopStats(Settings settings, boolean useNoopStatsConfig) { // Use noop stats when pluggable caching is off, or when explicitly set in the CacheConfig - return !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || configUseNoopStats; + return !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || useNoopStatsConfig; } } From 4ffef167485ea8f196a466a544a876e5417fc2cd Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 14:42:01 -0700 Subject: [PATCH 15/20] Addressed Sagar's followup comments Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 4047d0aaefc71..a54ed12d9e615 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -35,6 +35,7 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; @@ -197,7 +198,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> // if present updateStatsOnPut(TIER_DIMENSION_VALUE_ON_HEAP, key, value); statsHolder.incrementMisses(heapDimensionValues); - if (caches.get(diskCache).isEnabled) { + if (caches.get(diskCache).isEnabled()) { statsHolder.incrementMisses(diskDimensionValues); } } else { @@ -205,14 +206,12 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> statsHolder.incrementHits(heapDimensionValues); } return value; - } - - else { + } else { // Handle stats for an initial hit from getValueFromTieredCache() if (cacheValueTuple.v2().equals(TIER_DIMENSION_VALUE_ON_HEAP)) { // A hit for the heap tier statsHolder.incrementHits(heapDimensionValues); - } else { + } else if (cacheValueTuple.v2().equals(TIER_DIMENSION_VALUE_DISK)) { // Miss for the heap tier, hit for the disk tier statsHolder.incrementMisses(heapDimensionValues); statsHolder.incrementHits(diskDimensionValues); @@ -227,13 +226,13 @@ public void invalidate(ICacheKey key) { // Doing this as we don't know where it is located. We could do a get from both and check that, but what will // also trigger a hit/miss listener event, so ignoring it for now. // We don't update stats here, as this is handled by the removal listeners for the tiers. - try (ReleasableLock ignore = writeLock.acquire()) { - for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { - if (key.getDropStatsForDimensions()) { - List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue().tierName); - statsHolder.removeDimensions(dimensionValues); - } - if (key.key != null) { + for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { + if (key.getDropStatsForDimensions()) { + List dimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, cacheEntry.getValue().tierName); + statsHolder.removeDimensions(dimensionValues); + } + if (key.key != null) { + try (ReleasableLock ignore = writeLock.acquire()) { cacheEntry.getKey().invalidate(key); } } @@ -303,7 +302,7 @@ private Function, Tuple> getValueFromTieredCache(boolean return key -> { try (ReleasableLock ignore = readLock.acquire()) { for (Map.Entry, TierInfo> cacheEntry : caches.entrySet()) { - if (cacheEntry.getValue().isEnabled) { + if (cacheEntry.getValue().isEnabled()) { V value = cacheEntry.getKey().get(key); // Get the tier value corresponding to this cache String tierValue = cacheEntry.getValue().tierName; @@ -326,11 +325,11 @@ private Function, Tuple> getValueFromTieredCache(boolean void handleRemovalFromHeapTier(RemovalNotification, V> notification) { ICacheKey key = notification.getKey(); boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()); - if (caches.get(diskCache).isEnabled && wasEvicted && evaluatePolicies(notification.getValue())) { + if (caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue())) { try (ReleasableLock ignore = writeLock.acquire()) { diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats - updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); } + updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue()); } else { // If the value is not going to the disk cache, send this notification to the TSC's removal listener // as the value is leaving the TSC entirely @@ -464,13 +463,17 @@ public void remove() { } private class TierInfo { - boolean isEnabled; - String tierName; + AtomicBoolean isEnabled; + final String tierName; TierInfo(boolean isEnabled, String tierName) { - this.isEnabled = isEnabled; + this.isEnabled = new AtomicBoolean(isEnabled); this.tierName = tierName; } + + boolean isEnabled() { + return isEnabled.get(); + } } /** From 8b9dd703d84c8595913d769f7135cb9c7828ba2d Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 15:22:54 -0700 Subject: [PATCH 16/20] cleanup Signed-off-by: Peter Alfonsi --- .../common/cache/stats/DefaultCacheStatsHolder.java | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java index 135ed5ad4f0a3..2770f1cd6e398 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -298,17 +298,6 @@ Node createChild(String dimensionValue, boolean createMapInChild) { return children.computeIfAbsent(dimensionValue, (key) -> new Node(dimensionValue, createMapInChild)); } - /*ImmutableCacheStatsHolder.Node snapshot() { - TreeMap snapshotChildren = null; - if (!children.isEmpty()) { - snapshotChildren = new TreeMap<>(); - for (Node child : children.values()) { - snapshotChildren.put(child.getDimensionValue(), child.snapshot()); - } - } - return new ImmutableCacheStatsHolder.Node(dimensionValue, snapshotChildren, getImmutableStats()); - }*/ - /** * Return whether this is a leaf node which is at the lowest level of the tree. * Does not return true if this is a node at a higher level whose children are still being constructed. From 0238ad9796c3a007103ea1d010b567a39cc37e12 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 16:11:24 -0700 Subject: [PATCH 17/20] rerun Signed-off-by: Peter Alfonsi From 2a5f5e209fbc1b316918ff2d519ab8583ddccb07 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 29 Apr 2024 17:13:28 -0700 Subject: [PATCH 18/20] rerun Signed-off-by: Peter Alfonsi From 8bb6942e6c09c44cb92cd4281b052c6d7bdee47c Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 30 Apr 2024 11:49:46 -0700 Subject: [PATCH 19/20] Refactored useNoopStats to its opposite statsTrackingEnabled Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 4 +-- .../cache/common/tier/MockDiskCache.java | 26 +++++++++---------- .../tier/TieredSpilloverCacheTests.java | 6 ++++- .../cache/store/disk/EhcacheDiskCache.java | 10 +++---- .../store/disk/EhCacheDiskCacheTests.java | 4 +-- .../cache/store/OpenSearchOnHeapCache.java | 20 +++++++------- .../cache/store/builders/ICacheBuilder.java | 10 +++---- .../cache/store/config/CacheConfig.java | 14 +++++----- .../store/OpenSearchOnHeapCacheTests.java | 14 +++++----- 9 files changed, 56 insertions(+), 52 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 0b4a7c6a308be..9942651ccdd67 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -107,7 +107,7 @@ public class TieredSpilloverCache implements ICache { .setMaxSizeInBytes(builder.cacheConfig.getMaxSizeInBytes()) .setExpireAfterAccess(builder.cacheConfig.getExpireAfterAccess()) .setClusterSettings(builder.cacheConfig.getClusterSettings()) - .setUseNoopStats(true) + .setStatsTrackingEnabled(false) .build(), builder.cacheType, builder.cacheFactories @@ -120,7 +120,7 @@ public class TieredSpilloverCache implements ICache { .setSettings(builder.cacheConfig.getSettings()) .setWeigher(builder.cacheConfig.getWeigher()) .setDimensionNames(builder.cacheConfig.getDimensionNames()) - .setUseNoopStats(true) + .setStatsTrackingEnabled(false) .build(), builder.cacheType, builder.cacheFactories diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 6486e4d990b10..2058faa5181b1 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -36,17 +36,18 @@ public class MockDiskCache implements ICache { long delay; private final RemovalListener, V> removalListener; - private final CacheStatsHolder statsHolder; // Only update for number of entries; this is only used to test useNoopStats logic in TSC + private final CacheStatsHolder statsHolder; // Only update for number of entries; this is only used to test statsTrackingEnabled logic + // in TSC - public MockDiskCache(int maxSize, long delay, RemovalListener, V> removalListener, boolean useNoopStats) { + public MockDiskCache(int maxSize, long delay, RemovalListener, V> removalListener, boolean statsTrackingEnabled) { this.maxSize = maxSize; this.delay = delay; this.removalListener = removalListener; this.cache = new ConcurrentHashMap, V>(); - if (useNoopStats) { - this.statsHolder = NoopCacheStatsHolder.getInstance(); - } else { + if (statsTrackingEnabled) { this.statsHolder = new DefaultCacheStatsHolder(List.of(), "mock_disk_cache"); + } else { + this.statsHolder = NoopCacheStatsHolder.getInstance(); } } @@ -109,8 +110,8 @@ public void refresh() {} @Override public ImmutableCacheStatsHolder stats() { - // To allow testing of useNoopStats logic in TSC, return a dummy ImmutableCacheStatsHolder with the - // right number of entries, unless useNoopStats is true + // To allow testing of statsTrackingEnabled logic in TSC, return a dummy ImmutableCacheStatsHolder with the + // right number of entries, unless statsTrackingEnabled is false return statsHolder.getImmutableCacheStatsHolder(null); } @@ -129,12 +130,12 @@ public static class MockDiskCacheFactory implements Factory { public static final String NAME = "mockDiskCache"; final long delay; final int maxSize; - final boolean useNoopStats; + final boolean statsTrackingEnabled; - public MockDiskCacheFactory(long delay, int maxSize, boolean useNoopStats) { + public MockDiskCacheFactory(long delay, int maxSize, boolean statsTrackingEnabled) { this.delay = delay; this.maxSize = maxSize; - this.useNoopStats = useNoopStats; + this.statsTrackingEnabled = statsTrackingEnabled; } @Override @@ -145,7 +146,7 @@ public ICache create(CacheConfig config, CacheType cacheType, .setMaxSize(maxSize) .setDeliberateDelay(delay) .setRemovalListener(config.getRemovalListener()) - .setUseNoopStats(config.getUseNoopStats()) + .setStatsTrackingEnabled(config.getStatsTrackingEnabled()) .build(); } @@ -164,8 +165,7 @@ public static class Builder extends ICacheBuilder { @Override public ICache build() { - boolean useNoopStats = getUseNoopStats(); - return new MockDiskCache(this.maxSize, this.delay, this.getRemovalListener(), getUseNoopStats()); + return new MockDiskCache(this.maxSize, this.delay, this.getRemovalListener(), getStatsTrackingEnabled()); } public Builder setMaxSize(int maxSize) { diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index ddc7bb6fd4aa8..6d5ee91326338 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -49,6 +49,7 @@ import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK; import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP; import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; @@ -1463,7 +1464,10 @@ private long getItemsForTier(TieredSpilloverCache tsc, String tierValue) t } private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache tsc, String tierValue) throws IOException { - ImmutableCacheStatsHolder cacheStats = tsc.stats(); + List levelsList = new ArrayList<>(dimensionNames); + levelsList.add(TIER_DIMENSION_NAME); + String[] levels = levelsList.toArray(new String[0]); + ImmutableCacheStatsHolder cacheStats = tsc.stats(levels); // Since we always use the same list of dimensions from getMockDimensions() in keys for these tests, we can get all the stats values // for a given tier with a single node in MDCS List mockDimensions = getMockDimensions(); diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 430e8526d149d..9a4dce1067b61 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -164,12 +164,12 @@ private EhcacheDiskCache(Builder builder) { this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener(), builder.getWeigher()); this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); List dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - if (builder.getUseNoopStats()) { - this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); - } else { + if (builder.getStatsTrackingEnabled()) { // If this cache is being used, FeatureFlags.PLUGGABLE_CACHE is already on, so we can always use the DefaultCacheStatsHolder - // unless useNoopStats is explicitly set in CacheConfig. + // unless statsTrackingEnabled is explicitly set to false in CacheConfig. this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames, EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME); + } else { + this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); } } @@ -420,7 +420,7 @@ public Iterable> keys() { /** * Gives the current count of keys in disk cache. - * If useNoopStats is set to true in the builder, always returns 0. + * If enableStatsTracking is set to false in the builder, always returns 0. * @return current count of keys */ @Override diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index e03787d5abf28..29551befd3e9f 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -850,7 +850,7 @@ public void testInvalidateWithDropDimensions() throws Exception { } } - public void testNoopStats() throws Exception { + public void testStatsTrackingDisabled() throws Exception { Settings settings = Settings.builder().build(); MockRemovalListener removalListener = new MockRemovalListener<>(); ToLongBiFunction, String> weigher = getWeigher(); @@ -869,7 +869,7 @@ public void testNoopStats() throws Exception { .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) .setRemovalListener(removalListener) .setWeigher(weigher) - .setUseNoopStats(true) + .setStatsTrackingEnabled(false) .build(); int randomKeys = randomIntBetween(10, 100); for (int i = 0; i < randomKeys; i++) { diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 1022654d89b4b..569653bec2a3d 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -53,7 +53,7 @@ public class OpenSearchOnHeapCache implements ICache, RemovalListene private final RemovalListener, V> removalListener; private final List dimensionNames; private final ToLongBiFunction, V> weigher; - private final boolean useNoopStats; + private final boolean statsTrackingEnabled; public OpenSearchOnHeapCache(Builder builder) { CacheBuilder, V> cacheBuilder = CacheBuilder., V>builder() @@ -65,11 +65,11 @@ public OpenSearchOnHeapCache(Builder builder) { } cache = cacheBuilder.build(); this.dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.useNoopStats = builder.getUseNoopStats(); - if (useNoopStats) { - this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); - } else { + this.statsTrackingEnabled = builder.getStatsTrackingEnabled(); + if (statsTrackingEnabled) { this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames, OpenSearchOnHeapCacheFactory.NAME); + } else { + this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); } this.removalListener = builder.getRemovalListener(); this.weigher = builder.getWeigher(); @@ -171,9 +171,9 @@ public static class OpenSearchOnHeapCacheFactory implements Factory { public ICache create(CacheConfig config, CacheType cacheType, Map cacheFactories) { Map> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); Settings settings = config.getSettings(); - boolean useNoopStats = useNoopStats(config.getSettings(), config.getUseNoopStats()); + boolean statsTrackingEnabled = statsTrackingEnabled(config.getSettings(), config.getStatsTrackingEnabled()); ICacheBuilder builder = new Builder().setDimensionNames(config.getDimensionNames()) - .setUseNoopStats(useNoopStats) + .setStatsTrackingEnabled(statsTrackingEnabled) .setMaximumWeightInBytes(((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes()) .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) .setWeigher(config.getWeigher()) @@ -195,9 +195,9 @@ public String getCacheName() { return NAME; } - private boolean useNoopStats(Settings settings, boolean useNoopStatsConfig) { - // Use noop stats when pluggable caching is off, or when explicitly set in the CacheConfig - return !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || useNoopStatsConfig; + private boolean statsTrackingEnabled(Settings settings, boolean statsTrackingEnabledConfig) { + // Don't track stats when pluggable caching is off, or when explicitly set to false in the CacheConfig + return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && statsTrackingEnabledConfig; } } diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java index 66c7aa9e709d7..a308d1db88258 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java @@ -37,7 +37,7 @@ public abstract class ICacheBuilder { private RemovalListener, V> removalListener; - private boolean useNoopStats; + private boolean statsTrackingEnabled = true; public ICacheBuilder() {} @@ -66,8 +66,8 @@ public ICacheBuilder setRemovalListener(RemovalListener, V> r return this; } - public ICacheBuilder setUseNoopStats(boolean useNoopStats) { - this.useNoopStats = useNoopStats; + public ICacheBuilder setStatsTrackingEnabled(boolean statsTrackingEnabled) { + this.statsTrackingEnabled = statsTrackingEnabled; return this; } @@ -91,8 +91,8 @@ public Settings getSettings() { return settings; } - public boolean getUseNoopStats() { - return useNoopStats; + public boolean getStatsTrackingEnabled() { + return statsTrackingEnabled; } public abstract ICache build(); diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java index 60d524ee51401..0c54ac57a9b18 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -68,7 +68,7 @@ public class CacheConfig { private final ClusterSettings clusterSettings; - private final boolean useNoopStats; + private final boolean statsTrackingEnabled; private CacheConfig(Builder builder) { this.keyType = builder.keyType; @@ -83,7 +83,7 @@ private CacheConfig(Builder builder) { this.maxSizeInBytes = builder.maxSizeInBytes; this.expireAfterAccess = builder.expireAfterAccess; this.clusterSettings = builder.clusterSettings; - this.useNoopStats = builder.useNoopStats; + this.statsTrackingEnabled = builder.statsTrackingEnabled; } public Class getKeyType() { @@ -134,8 +134,8 @@ public ClusterSettings getClusterSettings() { return clusterSettings; } - public boolean getUseNoopStats() { - return useNoopStats; + public boolean getStatsTrackingEnabled() { + return statsTrackingEnabled; } /** @@ -162,7 +162,7 @@ public static class Builder { private TimeValue expireAfterAccess; private ClusterSettings clusterSettings; - private boolean useNoopStats; + private boolean statsTrackingEnabled = true; public Builder() {} @@ -226,8 +226,8 @@ public Builder setClusterSettings(ClusterSettings clusterSettings) { return this; } - public Builder setUseNoopStats(boolean useNoopStats) { - this.useNoopStats = useNoopStats; + public Builder setStatsTrackingEnabled(boolean statsTrackingEnabled) { + this.statsTrackingEnabled = statsTrackingEnabled; return this; } diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 217c5d7203111..f227db6fee2d1 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -39,7 +39,7 @@ public void testStats() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, false); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, true); // When the pluggable caches setting is on, we should get stats as expected from cache.stats(). @@ -82,14 +82,14 @@ public void testStats() throws Exception { } public void testStatsWithoutPluggableCaches() throws Exception { - // When the pluggable caches setting is off, or when we manually set useNoopStats = true in the config, + // When the pluggable caches setting is off, or when we manually set statsTrackingEnabled = false in the config, // we should get all-zero stats from cache.stats(), but count() should still work. MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache pluggableCachesOffCache = getCache(maxKeys, listener, false, false); - OpenSearchOnHeapCache manuallySetNoopStatsCache = getCache(maxKeys, listener, true, true); + OpenSearchOnHeapCache pluggableCachesOffCache = getCache(maxKeys, listener, false, true); + OpenSearchOnHeapCache manuallySetNoopStatsCache = getCache(maxKeys, listener, true, false); List> caches = List.of(pluggableCachesOffCache, manuallySetNoopStatsCache); for (OpenSearchOnHeapCache cache : caches) { @@ -113,7 +113,7 @@ private OpenSearchOnHeapCache getCache( int maxSizeKeys, MockRemovalListener listener, boolean pluggableCachesSetting, - boolean useNoopStatsInConfig + boolean statsTrackingEnabled ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); Settings settings = Settings.builder() @@ -133,7 +133,7 @@ private OpenSearchOnHeapCache getCache( .setSettings(settings) .setDimensionNames(dimensionNames) .setMaxSizeInBytes(maxSizeKeys * keyValueSize) - .setUseNoopStats(useNoopStatsInConfig) + .setStatsTrackingEnabled(statsTrackingEnabled) .build(); return (OpenSearchOnHeapCache) onHeapCacheFactory.create(cacheConfig, CacheType.INDICES_REQUEST_CACHE, null); } @@ -141,7 +141,7 @@ private OpenSearchOnHeapCache getCache( public void testInvalidateWithDropDimensions() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = 50; - OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, false); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true, true); List> keysAdded = new ArrayList<>(); From ef2bffa6c2f6330cca7748c2900d409a5b71bde1 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 30 Apr 2024 11:55:26 -0700 Subject: [PATCH 20/20] Reworded description comment Signed-off-by: Peter Alfonsi --- .../tier/TieredSpilloverCacheStatsHolder.java | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java index e60e143634031..d17059e8dee94 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsHolder.java @@ -15,12 +15,17 @@ import java.util.function.Consumer; /** - * A tier-aware version of DefaultCacheStatsHolder. Overrides the incrementer functions, as we cannot just add the on-heap - * and disk stats to get a total for the cache as a whole. For example, if the heap tier has 5 misses and the disk tier - * has 4, the total cache has had 4 misses, not 9. The same goes for evictions. Other stats values add normally. + * A tier-aware version of DefaultCacheStatsHolder. Overrides the incrementer functions, as we can't just add the on-heap + * and disk stats to get a total for the cache as a whole. If the disk tier is present, the total hits, size, and entries + * should be the sum of both tiers' values, but the total misses and evictions should be the disk tier's values. + * When the disk tier isn't present, on-heap misses and evictions should contribute to the total. + * + * For example, if the heap tier has 5 misses and the disk tier has 4, the total cache has had 4 misses, not 9. + * The same goes for evictions. Other stats values add normally. + * * This means for misses and evictions, if we are incrementing for the on-heap tier and the disk tier is present, - * we have to increment only the leaf nodes corresponding to the on-heap tier itself, and not its ancestors. - * If the disk tier is not present, we do increment the ancestor nodes. + * we have to increment only the leaf nodes corresponding to the on-heap tier itself, and not its ancestors, + * which correspond to totals including both tiers. If the disk tier is not present, we do increment the ancestor nodes. */ public class TieredSpilloverCacheStatsHolder extends DefaultCacheStatsHolder {