From 85fd7b5e98cff85861a21d38e9e8fcfabebd02f4 Mon Sep 17 00:00:00 2001 From: Jackie Tien Date: Thu, 14 Nov 2024 09:46:39 +0800 Subject: [PATCH] [To dev/1.3] Add cache hit situation and actual io size for BloomFilter, TimeSeriesMetadata(including ChunkMetadatList) and Chunk (#14082) --- .../fragment/FragmentInstanceContext.java | 19 ++ .../execution/fragment/QueryStatistics.java | 65 ++++- .../operator/source/FileLoaderUtils.java | 16 +- .../metric/SeriesScanCostMetricSet.java | 233 +++++++++++++++++- .../FragmentInstanceStatisticsDrawer.java | 64 +++++ .../buffer/BloomFilterCache.java | 80 ++++-- .../db/storageengine/buffer/ChunkCache.java | 125 ++++++++-- .../buffer/TimeSeriesMetadataCache.java | 65 ++++- .../read/control/FileReaderManager.java | 22 +- .../reader/chunk/DiskAlignedChunkLoader.java | 6 +- .../read/reader/chunk/DiskChunkLoader.java | 7 +- .../storageengine/buffer/ChunkCacheTest.java | 6 +- .../compaction/TestUtilsForAlignedSeries.java | 6 +- .../exception/IoTDBIORuntimeException.java | 33 +++ .../commons/service/metric/enums/Metric.java | 3 + .../src/main/thrift/datanode.thrift | 13 + pom.xml | 2 +- 17 files changed, 681 insertions(+), 84 deletions(-) create mode 100644 iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/IoTDBIORuntimeException.java diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java index 92ea44735c87..22077803e97a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/FragmentInstanceContext.java @@ -654,6 +654,13 @@ public synchronized void releaseResource() { long durationTime = System.currentTimeMillis() - executionStartTime.get(); QueryRelatedResourceMetricSet.getInstance().updateFragmentInstanceTime(durationTime); + SeriesScanCostMetricSet.getInstance() + .recordBloomFilterMetrics( + getQueryStatistics().getLoadBloomFilterFromCacheCount().get(), + getQueryStatistics().getLoadBloomFilterFromDiskCount().get(), + getQueryStatistics().getLoadBloomFilterActualIOSize().get(), + getQueryStatistics().getLoadBloomFilterTime().get()); + SeriesScanCostMetricSet.getInstance() .recordNonAlignedTimeSeriesMetadataCount( getQueryStatistics().getLoadTimeSeriesMetadataDiskSeqCount().get(), @@ -679,6 +686,12 @@ public synchronized void releaseResource() { getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemSeqTime().get(), getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemUnSeqTime().get()); + SeriesScanCostMetricSet.getInstance() + .recordTimeSeriesMetadataMetrics( + getQueryStatistics().getLoadTimeSeriesMetadataFromCacheCount().get(), + getQueryStatistics().getLoadTimeSeriesMetadataFromDiskCount().get(), + getQueryStatistics().getLoadTimeSeriesMetadataActualIOSize().get()); + SeriesScanCostMetricSet.getInstance() .recordConstructChunkReadersCount( getQueryStatistics().getConstructAlignedChunkReadersMemCount().get(), @@ -692,6 +705,12 @@ public synchronized void releaseResource() { getQueryStatistics().getConstructNonAlignedChunkReadersMemTime().get(), getQueryStatistics().getConstructNonAlignedChunkReadersDiskTime().get()); + SeriesScanCostMetricSet.getInstance() + .recordChunkMetrics( + getQueryStatistics().getLoadChunkFromCacheCount().get(), + getQueryStatistics().getLoadChunkFromDiskCount().get(), + getQueryStatistics().getLoadChunkActualIOSize().get()); + SeriesScanCostMetricSet.getInstance() .recordPageReadersDecompressCount( getQueryStatistics().getPageReadersDecodeAlignedMemCount().get(), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java index b996569a69db..b1ddfdbf4d74 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/fragment/QueryStatistics.java @@ -29,6 +29,11 @@ */ public class QueryStatistics { + private final AtomicLong loadBloomFilterFromCacheCount = new AtomicLong(0); + private final AtomicLong loadBloomFilterFromDiskCount = new AtomicLong(0); + private final AtomicLong loadBloomFilterActualIOSize = new AtomicLong(0); + private final AtomicLong loadBloomFilterTime = new AtomicLong(0); + // statistics for count and time of load timeseriesmetadata private final AtomicLong loadTimeSeriesMetadataDiskSeqCount = new AtomicLong(0); private final AtomicLong loadTimeSeriesMetadataDiskUnSeqCount = new AtomicLong(0); @@ -48,6 +53,10 @@ public class QueryStatistics { private final AtomicLong loadTimeSeriesMetadataAlignedMemSeqTime = new AtomicLong(0); private final AtomicLong loadTimeSeriesMetadataAlignedMemUnSeqTime = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataFromCacheCount = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataFromDiskCount = new AtomicLong(0); + private final AtomicLong loadTimeSeriesMetadataActualIOSize = new AtomicLong(0); + // statistics for count and time of construct chunk readers(disk io and decompress) private final AtomicLong constructNonAlignedChunkReadersDiskCount = new AtomicLong(0); private final AtomicLong constructNonAlignedChunkReadersMemCount = new AtomicLong(0); @@ -59,6 +68,10 @@ public class QueryStatistics { private final AtomicLong constructAlignedChunkReadersDiskTime = new AtomicLong(0); private final AtomicLong constructAlignedChunkReadersMemTime = new AtomicLong(0); + private final AtomicLong loadChunkFromCacheCount = new AtomicLong(0); + private final AtomicLong loadChunkFromDiskCount = new AtomicLong(0); + private final AtomicLong loadChunkActualIOSize = new AtomicLong(0); + // statistics for count and time of page decode private final AtomicLong pageReadersDecodeAlignedDiskCount = new AtomicLong(0); private final AtomicLong pageReadersDecodeAlignedDiskTime = new AtomicLong(0); @@ -225,6 +238,46 @@ public AtomicLong getPageReaderMaxUsedMemorySize() { return pageReaderMaxUsedMemorySize; } + public AtomicLong getLoadBloomFilterActualIOSize() { + return loadBloomFilterActualIOSize; + } + + public AtomicLong getLoadBloomFilterFromCacheCount() { + return loadBloomFilterFromCacheCount; + } + + public AtomicLong getLoadBloomFilterFromDiskCount() { + return loadBloomFilterFromDiskCount; + } + + public AtomicLong getLoadBloomFilterTime() { + return loadBloomFilterTime; + } + + public AtomicLong getLoadChunkActualIOSize() { + return loadChunkActualIOSize; + } + + public AtomicLong getLoadChunkFromCacheCount() { + return loadChunkFromCacheCount; + } + + public AtomicLong getLoadChunkFromDiskCount() { + return loadChunkFromDiskCount; + } + + public AtomicLong getLoadTimeSeriesMetadataActualIOSize() { + return loadTimeSeriesMetadataActualIOSize; + } + + public AtomicLong getLoadTimeSeriesMetadataFromCacheCount() { + return loadTimeSeriesMetadataFromCacheCount; + } + + public AtomicLong getLoadTimeSeriesMetadataFromDiskCount() { + return loadTimeSeriesMetadataFromDiskCount; + } + public TQueryStatistics toThrift() { return new TQueryStatistics( loadTimeSeriesMetadataDiskSeqCount.get(), @@ -263,6 +316,16 @@ public TQueryStatistics toThrift() { alignedTimeSeriesMetadataModificationCount.get(), alignedTimeSeriesMetadataModificationTime.get(), nonAlignedTimeSeriesMetadataModificationCount.get(), - nonAlignedTimeSeriesMetadataModificationTime.get()); + nonAlignedTimeSeriesMetadataModificationTime.get(), + loadBloomFilterFromCacheCount.get(), + loadBloomFilterFromDiskCount.get(), + loadBloomFilterActualIOSize.get(), + loadBloomFilterTime.get(), + loadTimeSeriesMetadataFromCacheCount.get(), + loadTimeSeriesMetadataFromDiskCount.get(), + loadTimeSeriesMetadataActualIOSize.get(), + loadChunkFromCacheCount.get(), + loadChunkFromDiskCount.get(), + loadChunkActualIOSize.get()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java index 0dea0e767782..ab582283b80a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/FileLoaderUtils.java @@ -33,6 +33,7 @@ import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemAlignedChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemChunkMetadataLoader; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex; import org.apache.tsfile.file.metadata.AlignedTimeSeriesMetadata; import org.apache.tsfile.file.metadata.IChunkMetadata; @@ -99,8 +100,9 @@ public static TimeseriesMetadata loadTimeSeriesMetadata( new PlainDeviceID(seriesPath.getDevice()), seriesPath.getMeasurement()), allSensors, - resource.getTimeIndexType() != 1, - context.isDebug()); + resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + context.isDebug(), + context); if (timeSeriesMetadata != null) { long t2 = System.nanoTime(); List pathModifications = context.getPathModifications(resource, seriesPath); @@ -268,8 +270,9 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk( filePath, new TimeSeriesMetadataCacheKey(resource.getTsFileID(), deviceId, ""), allSensors, - resource.getTimeIndexType() != 1, - isDebug); + resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + isDebug, + context); if (timeColumn != null) { // only need time column, like count_time aggregation if (valueMeasurementList.isEmpty()) { @@ -290,8 +293,9 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk( new TimeSeriesMetadataCacheKey( resource.getTsFileID(), deviceId, valueMeasurement), allSensors, - resource.getTimeIndexType() != 1, - isDebug); + resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE, + isDebug, + context); exist = (exist || (valueColumn != null)); valueTimeSeriesMetadataList.add(valueColumn); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java index a50381a75b34..ab6fb12f5963 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/metric/SeriesScanCostMetricSet.java @@ -24,6 +24,7 @@ import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.impl.DoNothingMetricManager; import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Counter; import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.metrics.type.Timer; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -46,9 +47,103 @@ public static SeriesScanCostMetricSet getInstance() { public static final String NON_ALIGNED = "non_aligned"; public static final String MEM = "mem"; public static final String DISK = "disk"; + public static final String MEM_AND_DISK = "mem_and_disk"; + public static final String SEQUENCE = "sequence"; public static final String UNSEQUENCE = "unsequence"; + public static final String SEQ_AND_UNSEQ = "seq_and_unseq"; + + public static final String BLOOM_FILTER = "bloom_filter"; + public static final String TIMESERIES_METADATA = "timeseries_metadata"; + public static final String CHUNK = "chunk"; + + private Histogram loadBloomFilterFromCacheCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadBloomFilterFromDiskCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadBloomFilterActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + private Timer loadBloomFilterTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + public void recordBloomFilterMetrics( + long loadBloomFilterFromCacheCount, + long loadBloomFilterFromDiskCount, + long loadBloomFilterActualIOSize, + long loadBloomFilterNanoTime) { + loadBloomFilterFromCacheCountHistogram.update(loadBloomFilterFromCacheCount); + loadBloomFilterFromDiskCountHistogram.update(loadBloomFilterFromDiskCount); + loadBloomFilterActualIOSizeCounter.inc(loadBloomFilterActualIOSize); + loadBloomFilterTime.updateNanos(loadBloomFilterNanoTime); + } + + private void bindBloomFilter(AbstractMetricService metricService) { + loadBloomFilterFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + CACHE); + loadBloomFilterFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + DISK); + loadBloomFilterActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + BLOOM_FILTER); + loadBloomFilterTime = + metricService.getOrCreateTimer( + Metric.SERIES_SCAN_COST.toString(), + MetricLevel.IMPORTANT, + Tag.STAGE.toString(), + BLOOM_FILTER, + Tag.TYPE.toString(), + SEQ_AND_UNSEQ, + Tag.FROM.toString(), + MEM_AND_DISK); + } + + private void unbindBloomFilter(AbstractMetricService metricService) { + loadBloomFilterFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadBloomFilterFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadBloomFilterActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + loadBloomFilterTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + BLOOM_FILTER, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, Metric.QUERY_DISK_READ.toString(), Tag.TYPE.toString(), BLOOM_FILTER); + metricService.remove( + MetricType.TIMER, + Metric.SERIES_SCAN_COST.toString(), + Tag.STAGE.toString(), + BLOOM_FILTER, + Tag.TYPE.toString(), + SEQ_AND_UNSEQ, + Tag.FROM.toString(), + MEM_AND_DISK); + } + private Histogram loadTimeSeriesMetadataDiskSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; private Histogram loadTimeSeriesMetadataDiskUnSeqHistogram = @@ -72,10 +167,18 @@ public static SeriesScanCostMetricSet getInstance() { private Timer loadTimeSeriesMetadataMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer loadTimeSeriesMetadataMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedDiskSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedDiskUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; - public Timer loadTimeSeriesMetadataAlignedMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedDiskSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedDiskUnSeqTime = + DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedMemSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer loadTimeSeriesMetadataAlignedMemUnSeqTime = DoNothingMetricManager.DO_NOTHING_TIMER; + + private Histogram loadTimeSeriesMetadataFromCacheCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadTimeSeriesMetadataFromDiskCountHistogram = + DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadTimeSeriesMetadataActualIOSizeCounter = + DoNothingMetricManager.DO_NOTHING_COUNTER; public void recordNonAlignedTimeSeriesMetadataCount(long c1, long c2, long c3, long c4) { loadTimeSeriesMetadataDiskSeqHistogram.update(c1); @@ -105,6 +208,15 @@ public void recordAlignedTimeSeriesMetadataTime(long t1, long t2, long t3, long loadTimeSeriesMetadataAlignedMemUnSeqTime.updateNanos(t4); } + public void recordTimeSeriesMetadataMetrics( + long loadTimeSeriesMetadataFromCacheCount, + long loadTimeSeriesMetadataFromDiskCount, + long loadTimeSeriesMetadataActualIOSize) { + loadTimeSeriesMetadataFromCacheCountHistogram.update(loadTimeSeriesMetadataFromCacheCount); + loadTimeSeriesMetadataFromDiskCountHistogram.update(loadTimeSeriesMetadataFromDiskCount); + loadTimeSeriesMetadataActualIOSizeCounter.inc(loadTimeSeriesMetadataActualIOSize); + } + private void bindTimeseriesMetadata(AbstractMetricService metricService) { loadTimeSeriesMetadataDiskSeqHistogram = metricService.getOrCreateHistogram( @@ -273,6 +385,57 @@ private void bindAlignedTimeseriesMetadata(AbstractMetricService metricService) MEM); } + private void bindTimeSeriesMetadataCache(AbstractMetricService metricService) { + loadTimeSeriesMetadataFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + CACHE); + loadTimeSeriesMetadataFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + DISK); + loadTimeSeriesMetadataActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + TIMESERIES_METADATA); + } + + private void unbindTimeSeriesMetadataCache(AbstractMetricService metricService) { + loadTimeSeriesMetadataFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadTimeSeriesMetadataFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadTimeSeriesMetadataActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, + Metric.QUERY_DISK_READ.toString(), + Tag.TYPE.toString(), + TIMESERIES_METADATA); + } + private void unbindTimeseriesMetadata(AbstractMetricService metricService) { loadTimeSeriesMetadataDiskSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; loadTimeSeriesMetadataDiskUnSeqHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; @@ -744,6 +907,10 @@ private void unbindChunkMetadataFilter(AbstractMetricService metricService) { private Timer constructChunkReadersNonAlignedMemTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer constructChunkReadersNonAlignedDiskTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Histogram loadChunkFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Histogram loadChunkFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + private Counter loadChunkActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + public void recordConstructChunkReadersCount( long alignedMemCount, long alignedDiskCount, @@ -763,6 +930,58 @@ public void recordConstructChunkReadersTime( constructChunkReadersNonAlignedDiskTimer.updateNanos(nonAlignedDiskTime); } + public void recordChunkMetrics( + long loadChunkFromCacheCount, long loadChunkFromDiskCount, long loadChunkActualIOSize) { + loadChunkFromCacheCountHistogram.update(loadChunkFromCacheCount); + loadChunkFromDiskCountHistogram.update(loadChunkFromDiskCount); + loadChunkActualIOSizeCounter.inc(loadChunkActualIOSize); + } + + private void bindChunk(AbstractMetricService metricService) { + loadChunkFromCacheCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + CACHE); + loadChunkFromDiskCountHistogram = + metricService.getOrCreateHistogram( + Metric.METRIC_QUERY_CACHE.toString(), + MetricLevel.IMPORTANT, + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + DISK); + loadChunkActualIOSizeCounter = + metricService.getOrCreateCounter( + Metric.QUERY_DISK_READ.toString(), MetricLevel.IMPORTANT, Tag.TYPE.toString(), CHUNK); + } + + private void unbindChunk(AbstractMetricService metricService) { + loadChunkFromCacheCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadChunkFromDiskCountHistogram = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; + loadChunkActualIOSizeCounter = DoNothingMetricManager.DO_NOTHING_COUNTER; + + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + CACHE); + metricService.remove( + MetricType.HISTOGRAM, + Metric.METRIC_QUERY_CACHE.toString(), + Tag.TYPE.toString(), + CHUNK, + Tag.FROM.toString(), + DISK); + metricService.remove( + MetricType.COUNTER, Metric.QUERY_DISK_READ.toString(), Tag.TYPE.toString(), CHUNK); + } + private void bindConstructChunkReader(AbstractMetricService metricService) { constructChunkReadersAlignedMemHistogram = metricService.getOrCreateHistogram( @@ -1249,8 +1468,10 @@ private void unbindBuildTsBlockFromMergeReader(AbstractMetricService metricServi @Override public void bindTo(AbstractMetricService metricService) { + bindBloomFilter(metricService); bindTimeseriesMetadata(metricService); bindAlignedTimeseriesMetadata(metricService); + bindTimeSeriesMetadataCache(metricService); bindReadTimeseriesMetadata(metricService); bindTimeseriesMetadataModification(metricService); bindLoadChunkMetadataList(metricService); @@ -1258,6 +1479,7 @@ public void bindTo(AbstractMetricService metricService) { bindChunkMetadataFilter(metricService); bindConstructChunkReader(metricService); bindReadChunk(metricService); + bindChunk(metricService); bindInitChunkReader(metricService); bindTsBlockFromPageReader(metricService); bindBuildTsBlockFromMergeReader(metricService); @@ -1265,7 +1487,9 @@ public void bindTo(AbstractMetricService metricService) { @Override public void unbindFrom(AbstractMetricService metricService) { + unbindBloomFilter(metricService); unbindTimeseriesMetadata(metricService); + unbindTimeSeriesMetadataCache(metricService); unbindReadTimeseriesMetadata(metricService); unbindTimeseriesMetadataModification(metricService); unbindLoadChunkMetadataList(metricService); @@ -1273,6 +1497,7 @@ public void unbindFrom(AbstractMetricService metricService) { unbindChunkMetadataFilter(metricService); unbindConstructChunkReader(metricService); unbindReadChunk(metricService); + unbindChunk(metricService); unbindInitChunkReader(metricService); unbindTsBlockFromPageReader(metricService); unbindBuildTsBlockFromMergeReader(metricService); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/statistics/FragmentInstanceStatisticsDrawer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/statistics/FragmentInstanceStatisticsDrawer.java index bb40301506ac..0141a86e627b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/statistics/FragmentInstanceStatisticsDrawer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/statistics/FragmentInstanceStatisticsDrawer.java @@ -152,6 +152,11 @@ private void addLineWithValueCheck( } } + private void addLineWithoutValueCheck( + List singleFragmentInstanceArea, int level, String valueName, long value) { + addLine(singleFragmentInstanceArea, level, valueName + String.format(": %s", value)); + } + private void addLineWithValueCheck( List singleFragmentInstanceArea, int level, String valueName, double value) { if (Math.abs(value) > EPSILON) { @@ -159,6 +164,11 @@ private void addLineWithValueCheck( } } + private void addLineWithoutValueCheck( + List singleFragmentInstanceArea, int level, String valueName, double value) { + addLine(singleFragmentInstanceArea, level, valueName + String.format(": %.3f", value)); + } + private void addBlankLine(List singleFragmentInstanceArea) { addLine(singleFragmentInstanceArea, 0, " "); } @@ -167,6 +177,27 @@ private void renderQueryStatistics( TQueryStatistics queryStatistics, List singleFragmentInstanceArea) { addLine(singleFragmentInstanceArea, 1, "Query Statistics:"); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadBloomFilterFromCacheCount", + queryStatistics.loadBloomFilterFromCacheCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadBloomFilterFromDiskCount", + queryStatistics.loadBloomFilterFromDiskCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadBloomFilterActualIOSize", + queryStatistics.loadBloomFilterActualIOSize); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadBloomFilterTime", + queryStatistics.loadBloomFilterTime * NS_TO_MS_FACTOR); + addLineWithValueCheck( singleFragmentInstanceArea, 2, @@ -248,6 +279,23 @@ private void renderQueryStatistics( 2, "loadTimeSeriesMetadataAlignedMemUnSeqTime", queryStatistics.loadTimeSeriesMetadataAlignedMemUnSeqTime * NS_TO_MS_FACTOR); + + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadTimeSeriesMetadataFromCacheCount", + queryStatistics.loadTimeSeriesMetadataFromCacheCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadTimeSeriesMetadataFromDiskCount", + queryStatistics.loadTimeSeriesMetadataFromDiskCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadTimeSeriesMetadataActualIOSize", + queryStatistics.loadTimeSeriesMetadataActualIOSize); + addLineWithValueCheck( singleFragmentInstanceArea, 2, @@ -311,6 +359,22 @@ private void renderQueryStatistics( "constructAlignedChunkReadersMemTime", queryStatistics.constructAlignedChunkReadersMemTime * NS_TO_MS_FACTOR); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadChunkFromCacheCount", + queryStatistics.loadChunkFromCacheCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadChunkFromDiskCount", + queryStatistics.loadChunkFromDiskCount); + addLineWithoutValueCheck( + singleFragmentInstanceArea, + 2, + "loadChunkActualIOSize", + queryStatistics.loadChunkActualIOSize); + addLineWithValueCheck( singleFragmentInstanceArea, 2, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/BloomFilterCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/BloomFilterCache.java index ca2f3479908f..5166015d1bca 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/BloomFilterCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/BloomFilterCache.java @@ -19,13 +19,14 @@ package org.apache.iotdb.db.storageengine.buffer; +import org.apache.iotdb.commons.exception.IoTDBIORuntimeException; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; +import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.LoadingCache; import com.github.benmanes.caffeine.cache.Weigher; import org.apache.tsfile.read.TsFileSequenceReader; import org.apache.tsfile.utils.BloomFilter; @@ -36,6 +37,8 @@ import java.io.IOException; import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; +import java.util.function.LongConsumer; /** This class is used to cache BloomFilter in IoTDB. The caching strategy is LRU. */ @SuppressWarnings("squid:S6548") @@ -49,7 +52,7 @@ public class BloomFilterCache { private static final boolean CACHE_ENABLE = CONFIG.isMetaDataCacheEnable(); private final AtomicLong entryAverageSize = new AtomicLong(0); - private final LoadingCache lruCache; + private final Cache lruCache; private BloomFilterCache() { if (CACHE_ENABLE) { @@ -63,35 +66,48 @@ private BloomFilterCache() { (key, bloomFilter) -> (int) (key.getRetainedSizeInBytes() + bloomFilter.getRetainedSizeInBytes())) .recordStats() - .build( - key -> { - TsFileSequenceReader reader = - FileReaderManager.getInstance().get(key.filePath, true); - return reader.readBloomFilter(); - }); + .build(); } public static BloomFilterCache getInstance() { return BloomFilterCacheHolder.INSTANCE; } + @TestOnly public BloomFilter get(BloomFilterCacheKey key) throws IOException { - return get(key, false); + LongConsumer emptyConsumer = l -> {}; + return get(key, false, emptyConsumer, emptyConsumer, emptyConsumer); } - public BloomFilter get(BloomFilterCacheKey key, boolean debug) throws IOException { - if (!CACHE_ENABLE) { - TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true); - return reader.readBloomFilter(); - } + public BloomFilter get( + BloomFilterCacheKey key, + boolean debug, + LongConsumer ioSizeRecorder, + LongConsumer cacheHitAdder, + LongConsumer cacheMissAdder) + throws IOException { + BloomFilterLoader loader = new BloomFilterLoader(ioSizeRecorder); + try { + if (!CACHE_ENABLE) { + return loader.apply(key); + } - BloomFilter bloomFilter = lruCache.get(key); + BloomFilter bloomFilter = lruCache.get(key, loader); - if (debug) { - DEBUG_LOGGER.info("get bloomFilter from cache where filePath is: {}", key.filePath); - } + if (debug) { + DEBUG_LOGGER.info("get bloomFilter from cache where filePath is: {}", key.filePath); + } - return bloomFilter; + return bloomFilter; + } catch (IoTDBIORuntimeException e) { + throw e.getCause(); + } finally { + if (loader.isCacheMiss()) { + cacheMissAdder.accept(1); + } else { + cacheHitAdder.accept(1); + } + } } public double calculateBloomFilterHitRatio() { @@ -183,6 +199,32 @@ public long getRetainedSizeInBytes() { } } + private static class BloomFilterLoader implements Function { + + private boolean cacheMiss = false; + private final LongConsumer ioSizeRecorder; + + private BloomFilterLoader(LongConsumer ioSizeRecorder) { + this.ioSizeRecorder = ioSizeRecorder; + } + + @Override + public BloomFilter apply(BloomFilterCacheKey bloomFilterCacheKey) { + try { + cacheMiss = true; + TsFileSequenceReader reader = + FileReaderManager.getInstance().get(bloomFilterCacheKey.filePath, true, ioSizeRecorder); + return reader.readBloomFilter(ioSizeRecorder); + } catch (IOException e) { + throw new IoTDBIORuntimeException(e); + } + } + + public boolean isCacheMiss() { + return cacheMiss; + } + } + /** singleton pattern. */ private static class BloomFilterCacheHolder { private static final BloomFilterCache INSTANCE = new BloomFilterCache(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/ChunkCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/ChunkCache.java index 3ad81d399309..62a0189a4b98 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/ChunkCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/ChunkCache.java @@ -19,17 +19,19 @@ package org.apache.iotdb.db.storageengine.buffer; +import org.apache.iotdb.commons.exception.IoTDBIORuntimeException; import org.apache.iotdb.commons.service.metric.MetricService; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.queryengine.execution.fragment.QueryContext; import org.apache.iotdb.db.queryengine.metric.ChunkCacheMetrics; import org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileID; +import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.LoadingCache; import com.github.benmanes.caffeine.cache.Weigher; import org.apache.tsfile.file.metadata.statistics.Statistics; import org.apache.tsfile.read.TsFileSequenceReader; @@ -42,6 +44,8 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.function.Function; +import java.util.function.LongConsumer; import static org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet.READ_CHUNK_ALL; import static org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet.READ_CHUNK_FILE; @@ -64,7 +68,7 @@ public class ChunkCache { SeriesScanCostMetricSet.getInstance(); // to save memory footprint, we don't save measurementId in ChunkHeader of Chunk - private final LoadingCache lruCache; + private final Cache lruCache; private ChunkCache() { if (CACHE_ENABLE) { @@ -78,21 +82,7 @@ private ChunkCache() { (key, chunk) -> (int) (key.getRetainedSizeInBytes() + chunk.getRetainedSizeInBytes())) .recordStats() - .build( - key -> { - long startTime = System.nanoTime(); - try { - TsFileSequenceReader reader = - FileReaderManager.getInstance().get(key.getFilePath(), key.closed); - Chunk chunk = reader.readMemChunk(key.offsetOfChunkHeader); - // to save memory footprint, we don't save measurementId in ChunkHeader of Chunk - chunk.getHeader().setMeasurementID(null); - return chunk; - } finally { - SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( - READ_CHUNK_FILE, System.nanoTime() - startTime); - } - }); + .build(); // add metrics MetricService.getInstance().addMetricSet(new ChunkCacheMetrics(this)); @@ -106,36 +96,85 @@ public static ChunkCache getInstance() { return ChunkCacheHolder.INSTANCE; } + @TestOnly public Chunk get( + ChunkCacheKey chunkCacheKey, List timeRangeList, Statistics chunkStatistic) + throws IOException { + LongConsumer emptyConsumer = l -> {}; + return get( + chunkCacheKey, + timeRangeList, + chunkStatistic, + false, + emptyConsumer, + emptyConsumer, + emptyConsumer); + } + + public Chunk get( + ChunkCacheKey chunkCacheKey, + List timeRangeList, + Statistics chunkStatistic, + QueryContext queryContext) + throws IOException { + LongConsumer ioSizeRecorder = + queryContext.getQueryStatistics().getLoadChunkActualIOSize()::addAndGet; + LongConsumer cacheHitAdder = + queryContext.getQueryStatistics().getLoadChunkFromCacheCount()::addAndGet; + LongConsumer cacheMissAdder = + queryContext.getQueryStatistics().getLoadChunkFromDiskCount()::addAndGet; + return get( + chunkCacheKey, + timeRangeList, + chunkStatistic, + queryContext.isDebug(), + ioSizeRecorder, + cacheHitAdder, + cacheMissAdder); + } + + private Chunk get( ChunkCacheKey chunkCacheKey, List timeRangeList, Statistics chunkStatistic, - boolean debug) + boolean debug, + LongConsumer ioSizeRecorder, + LongConsumer cacheHitAdder, + LongConsumer cacheMissAdder) throws IOException { long startTime = System.nanoTime(); + ChunkLoader chunkLoader = new ChunkLoader(ioSizeRecorder); try { if (!CACHE_ENABLE) { - TsFileSequenceReader reader = - FileReaderManager.getInstance().get(chunkCacheKey.getFilePath(), true); - Chunk chunk = reader.readMemChunk(chunkCacheKey.offsetOfChunkHeader); - return new Chunk( - chunk.getHeader(), chunk.getData().duplicate(), timeRangeList, chunkStatistic); + Chunk chunk = chunkLoader.apply(chunkCacheKey); + return constructChunk(chunk, timeRangeList, chunkStatistic); } - Chunk chunk = lruCache.get(chunkCacheKey); + Chunk chunk = lruCache.get(chunkCacheKey, chunkLoader); if (debug) { DEBUG_LOGGER.info("get chunk from cache whose key is: {}", chunkCacheKey); } - return new Chunk( - chunk.getHeader(), chunk.getData().duplicate(), timeRangeList, chunkStatistic); + return constructChunk(chunk, timeRangeList, chunkStatistic); + } catch (IoTDBIORuntimeException e) { + throw e.getCause(); } finally { + if (chunkLoader.isCacheMiss()) { + cacheMissAdder.accept(1); + } else { + cacheHitAdder.accept(1); + } SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( READ_CHUNK_ALL, System.nanoTime() - startTime); } } + private Chunk constructChunk( + Chunk chunk, List timeRangeList, Statistics chunkStatistic) { + return new Chunk(chunk.getHeader(), chunk.getData().duplicate(), timeRangeList, chunkStatistic); + } + public double calculateChunkHitRatio() { return lruCache.stats().hitRate(); } @@ -245,6 +284,40 @@ public String toString() { } } + private static class ChunkLoader implements Function { + + private boolean cacheMiss = false; + private final LongConsumer ioSizeRecorder; + + private ChunkLoader(LongConsumer ioSizeRecorder) { + this.ioSizeRecorder = ioSizeRecorder; + } + + @Override + public Chunk apply(ChunkCacheKey key) { + + long startTime = System.nanoTime(); + try { + cacheMiss = true; + TsFileSequenceReader reader = + FileReaderManager.getInstance().get(key.getFilePath(), key.closed, ioSizeRecorder); + Chunk chunk = reader.readMemChunk(key.offsetOfChunkHeader, ioSizeRecorder); + // to save memory footprint, we don't save measurementId in ChunkHeader of Chunk + chunk.getHeader().setMeasurementID(null); + return chunk; + } catch (IOException e) { + throw new IoTDBIORuntimeException(e); + } finally { + SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( + READ_CHUNK_FILE, System.nanoTime() - startTime); + } + } + + public boolean isCacheMiss() { + return cacheMiss; + } + } + /** singleton pattern. */ private static class ChunkCacheHolder { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/TimeSeriesMetadataCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/TimeSeriesMetadataCache.java index bf308993e6a7..f32d424ae0d1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/TimeSeriesMetadataCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/buffer/TimeSeriesMetadataCache.java @@ -24,6 +24,7 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; +import org.apache.iotdb.db.queryengine.execution.fragment.QueryContext; import org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet; import org.apache.iotdb.db.queryengine.metric.TimeSeriesMetadataCacheMetrics; import org.apache.iotdb.db.storageengine.dataregion.read.control.FileReaderManager; @@ -51,6 +52,7 @@ import java.util.Set; import java.util.WeakHashMap; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongConsumer; import static org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet.READ_TIMESERIES_METADATA_CACHE; import static org.apache.iotdb.db.queryengine.metric.SeriesScanCostMetricSet.READ_TIMESERIES_METADATA_FILE; @@ -109,26 +111,38 @@ public TimeseriesMetadata get( TimeSeriesMetadataCacheKey key, Set allSensors, boolean ignoreNotExists, - boolean debug) + boolean debug, + QueryContext queryContext) throws IOException { long startTime = System.nanoTime(); + long loadBloomFilterTime = 0; + LongConsumer timeSeriesMetadataIoSizeRecorder = + queryContext.getQueryStatistics().getLoadTimeSeriesMetadataActualIOSize()::addAndGet; + LongConsumer bloomFilterIoSizeRecorder = + queryContext.getQueryStatistics().getLoadBloomFilterActualIOSize()::addAndGet; boolean cacheHit = true; try { if (!CACHE_ENABLE) { cacheHit = false; // bloom filter part - TsFileSequenceReader reader = FileReaderManager.getInstance().get(filePath, true); - BloomFilter bloomFilter = reader.readBloomFilter(); + TsFileSequenceReader reader = + FileReaderManager.getInstance().get(filePath, true, bloomFilterIoSizeRecorder); + BloomFilter bloomFilter = reader.readBloomFilter(bloomFilterIoSizeRecorder); + queryContext.getQueryStatistics().getLoadBloomFilterFromDiskCount().incrementAndGet(); if (bloomFilter != null && !bloomFilter.contains( ((PlainDeviceID) key.device).toStringID() + IoTDBConstant.PATH_SEPARATOR + key.measurement)) { + loadBloomFilterTime = System.nanoTime() - startTime; return null; } + loadBloomFilterTime = System.nanoTime() - startTime; + TimeseriesMetadata timeseriesMetadata = - reader.readTimeseriesMetadata(key.device, key.measurement, ignoreNotExists); + reader.readTimeseriesMetadata( + key.device, key.measurement, ignoreNotExists, timeSeriesMetadataIoSizeRecorder); return (timeseriesMetadata == null || timeseriesMetadata.getStatistics().getCount() == 0) ? null : timeseriesMetadata; @@ -151,6 +165,7 @@ public TimeseriesMetadata get( if (timeseriesMetadata == null) { cacheHit = false; + long loadBloomFilterStartTime = System.nanoTime(); // bloom filter part BloomFilter bloomFilter = BloomFilterCache.getInstance() @@ -161,7 +176,12 @@ public TimeseriesMetadata get( key.timePartitionId, key.tsFileVersion, key.compactionVersion), - debug); + debug, + bloomFilterIoSizeRecorder, + queryContext.getQueryStatistics().getLoadBloomFilterFromCacheCount() + ::addAndGet, + queryContext.getQueryStatistics().getLoadBloomFilterFromDiskCount() + ::addAndGet); if (bloomFilter != null && !bloomFilter.contains( ((PlainDeviceID) key.device).toStringID() @@ -170,11 +190,21 @@ public TimeseriesMetadata get( if (debug) { DEBUG_LOGGER.info("TimeSeries meta data {} is filter by bloomFilter!", key); } + loadBloomFilterTime = System.nanoTime() - loadBloomFilterStartTime; return null; } - TsFileSequenceReader reader = FileReaderManager.getInstance().get(filePath, true); + + loadBloomFilterTime = System.nanoTime() - loadBloomFilterStartTime; + TsFileSequenceReader reader = + FileReaderManager.getInstance() + .get(filePath, true, timeSeriesMetadataIoSizeRecorder); List timeSeriesMetadataList = - reader.readTimeseriesMetadata(key.device, key.measurement, allSensors); + reader.readTimeseriesMetadata( + key.device, + key.measurement, + allSensors, + ignoreNotExists, + timeSeriesMetadataIoSizeRecorder); // put TimeSeriesMetadata of all sensors used in this read into cache for (TimeseriesMetadata metadata : timeSeriesMetadataList) { TimeSeriesMetadataCacheKey k = @@ -212,9 +242,24 @@ public TimeseriesMetadata get( return new TimeseriesMetadata(timeseriesMetadata); } } finally { - SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( - cacheHit ? READ_TIMESERIES_METADATA_CACHE : READ_TIMESERIES_METADATA_FILE, - System.nanoTime() - startTime); + queryContext.getQueryStatistics().getLoadBloomFilterTime().getAndAdd(loadBloomFilterTime); + if (cacheHit) { + queryContext + .getQueryStatistics() + .getLoadTimeSeriesMetadataFromCacheCount() + .incrementAndGet(); + // in metric panel, loading BloomFilter time is included in loading TimeSeriesMetadata + SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( + READ_TIMESERIES_METADATA_CACHE, System.nanoTime() - startTime); + } else { + queryContext + .getQueryStatistics() + .getLoadTimeSeriesMetadataFromDiskCount() + .incrementAndGet(); + // in metric panel, loading BloomFilter time is included in loading TimeSeriesMetadata + SERIES_SCAN_COST_METRIC_SET.recordSeriesScanCost( + READ_TIMESERIES_METADATA_FILE, System.nanoTime() - startTime); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/FileReaderManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/FileReaderManager.java index b2e2d2f669d9..a814a1e148bc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/FileReaderManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/control/FileReaderManager.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongConsumer; /** * {@link FileReaderManager} is a singleton, which is used to manage all file readers(opened file @@ -114,6 +115,23 @@ public synchronized void closeFileAndRemoveReader(String filePath) throws IOExce @SuppressWarnings("squid:S2095") public synchronized TsFileSequenceReader get(String filePath, boolean isClosed) throws IOException { + return get(filePath, isClosed, null); + } + + /** + * Get the reader of the file(tsfile or unseq tsfile) indicated by filePath. If the reader already + * exists, just get it from closedFileReaderMap or unclosedFileReaderMap depending on isClosing . + * Otherwise a new reader will be created and cached. + * + * @param filePath the path of the file, of which the reader is desired. + * @param isClosed whether the corresponding file still receives insertions or not. + * @param ioSizeRecorder can be null + * @return the reader of the file specified by filePath. + * @throws IOException when reader cannot be created. + */ + @SuppressWarnings("squid:S2095") + public synchronized TsFileSequenceReader get( + String filePath, boolean isClosed, LongConsumer ioSizeRecorder) throws IOException { Map readerMap = !isClosed ? unclosedFileReaderMap : closedFileReaderMap; @@ -127,9 +145,9 @@ public synchronized TsFileSequenceReader get(String filePath, boolean isClosed) TsFileSequenceReader tsFileReader = null; // check if the file is old version if (!isClosed) { - tsFileReader = new UnClosedTsFileReader(filePath); + tsFileReader = new UnClosedTsFileReader(filePath, ioSizeRecorder); } else { - tsFileReader = new TsFileSequenceReader(filePath); + tsFileReader = new TsFileSequenceReader(filePath, ioSizeRecorder); byte versionNumber = tsFileReader.readVersionNumber(); if (versionNumber != TSFileConfig.VERSION_NUMBER) { tsFileReader.close(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskAlignedChunkLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskAlignedChunkLoader.java index 874955489138..070bb496b8d5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskAlignedChunkLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskAlignedChunkLoader.java @@ -43,7 +43,6 @@ public class DiskAlignedChunkLoader implements IChunkLoader { private final QueryContext context; - private final boolean debug; private final TsFileResource resource; @@ -52,7 +51,6 @@ public class DiskAlignedChunkLoader implements IChunkLoader { public DiskAlignedChunkLoader(QueryContext context, TsFileResource resource) { this.context = context; - this.debug = context.isDebug(); this.resource = resource; } @@ -83,7 +81,7 @@ public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter globalTi resource.isClosed()), timeChunkMetadata.getDeleteIntervalList(), timeChunkMetadata.getStatistics(), - debug); + context); List valueChunkList = new ArrayList<>(); for (IChunkMetadata valueChunkMetadata : alignedChunkMetadata.getValueChunkMetadataList()) { valueChunkList.add( @@ -98,7 +96,7 @@ public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter globalTi resource.isClosed()), valueChunkMetadata.getDeleteIntervalList(), valueChunkMetadata.getStatistics(), - debug)); + context)); } long t2 = System.nanoTime(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskChunkLoader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskChunkLoader.java index a956f4ef94f5..be33428ae65f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskChunkLoader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/read/reader/chunk/DiskChunkLoader.java @@ -44,11 +44,8 @@ public class DiskChunkLoader implements IChunkLoader { private final TsFileResource resource; - private final boolean debug; - public DiskChunkLoader(QueryContext context, TsFileResource resource) { this.context = context; - this.debug = context.isDebug(); this.resource = resource; } @@ -63,7 +60,7 @@ public Chunk loadChunk(ChunkMetadata chunkMetaData) throws IOException { resource.isClosed()), chunkMetaData.getDeleteIntervalList(), chunkMetaData.getStatistics(), - debug); + context); } @Override @@ -86,7 +83,7 @@ public IChunkReader getChunkReader(IChunkMetadata chunkMetaData, Filter globalTi resource.isClosed()), chunkMetaData.getDeleteIntervalList(), chunkMetaData.getStatistics(), - debug); + context); long t2 = System.nanoTime(); IChunkReader chunkReader = new ChunkReader(chunk, globalTimeFilter); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/buffer/ChunkCacheTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/buffer/ChunkCacheTest.java index 63d176a6b496..2713df3b38fc 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/buffer/ChunkCacheTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/buffer/ChunkCacheTest.java @@ -111,8 +111,7 @@ public void testChunkCache() throws IOException { firstChunkMetadata.getOffsetOfChunkHeader(), true), firstChunkMetadata.getDeleteIntervalList(), - firstChunkMetadata.getStatistics(), - false); + firstChunkMetadata.getStatistics()); ChunkMetadata chunkMetadataKey = new ChunkMetadata("sensor0", TSDataType.DOUBLE, 25, new DoubleStatistics()); @@ -127,8 +126,7 @@ public void testChunkCache() throws IOException { chunkMetadataKey.getOffsetOfChunkHeader(), true), chunkMetadataKey.getDeleteIntervalList(), - chunkMetadataKey.getStatistics(), - false); + chunkMetadataKey.getStatistics()); Assert.assertEquals(chunk1.getHeader(), chunk2.getHeader()); Assert.assertEquals(chunk1.getData(), chunk2.getData()); } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/TestUtilsForAlignedSeries.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/TestUtilsForAlignedSeries.java index 1790740068c4..c983270f3a04 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/TestUtilsForAlignedSeries.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/compaction/TestUtilsForAlignedSeries.java @@ -58,8 +58,10 @@ public static void writeTsFile( } else { writeNotAlignedChunkGroup(writer, device, schemas, startTime, endTime, randomNull[i]); } - tsFileResource.updateStartTime(new PlainDeviceID(devices[i]), startTime); - tsFileResource.updateEndTime(new PlainDeviceID(devices[i]), endTime); + if (endTime > startTime) { + tsFileResource.updateStartTime(new PlainDeviceID(devices[i]), startTime); + tsFileResource.updateEndTime(new PlainDeviceID(devices[i]), endTime); + } } writer.endFile(); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/IoTDBIORuntimeException.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/IoTDBIORuntimeException.java new file mode 100644 index 000000000000..6934febeac9c --- /dev/null +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/exception/IoTDBIORuntimeException.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.commons.exception; + +import java.io.IOException; + +public class IoTDBIORuntimeException extends RuntimeException { + public IoTDBIORuntimeException(IOException cause) { + super(cause); + } + + @Override + public synchronized IOException getCause() { + return (IOException) super.getCause(); + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java index 8aa0c7245227..5865aeec6def 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java @@ -107,6 +107,7 @@ public enum Metric { SERIES_SCAN_COST("series_scan_cost"), MEMORY_USAGE_MONITOR("memory_usage_monitor"), METRIC_LOAD_TIME_SERIES_METADATA("metric_load_time_series_metadata"), + METRIC_QUERY_CACHE("metric_query_cache"), QUERY_METADATA_COST("query_metadata_cost"), DISPATCHER("dispatcher"), QUERY_EXECUTION("query_execution"), @@ -120,6 +121,8 @@ public enum Metric { FRAGMENT_INSTANCE_MANAGER("fragment_instance_manager"), MEMORY_POOL("memory_pool"), LOCAL_EXECUTION_PLANNER("local_execution_planner"), + QUERY_DISK_READ("query_disk_read"), + // file related FILE_SIZE("file_size"), FILE_COUNT("file_count"), diff --git a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift index d8b9461cb4f5..387d60ac96f7 100644 --- a/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift +++ b/iotdb-protocol/thrift-datanode/src/main/thrift/datanode.thrift @@ -620,6 +620,19 @@ struct TQueryStatistics { 35: i64 alignedTimeSeriesMetadataModificationTime 36: i64 nonAlignedTimeSeriesMetadataModificationCount 37: i64 nonAlignedTimeSeriesMetadataModificationTime + + 38: i64 loadBloomFilterFromCacheCount + 39: i64 loadBloomFilterFromDiskCount + 40: i64 loadBloomFilterActualIOSize + 41: i64 loadBloomFilterTime + + 42: i64 loadTimeSeriesMetadataFromCacheCount + 43: i64 loadTimeSeriesMetadataFromDiskCount + 44: i64 loadTimeSeriesMetadataActualIOSize + + 45: i64 loadChunkFromCacheCount + 46: i64 loadChunkFromDiskCount + 47: i64 loadChunkActualIOSize } diff --git a/pom.xml b/pom.xml index 957dcf080e1d..4805167a85fb 100644 --- a/pom.xml +++ b/pom.xml @@ -166,7 +166,7 @@ 0.14.1 1.9 1.5.6-3 - 1.1.0-241108-SNAPSHOT + 1.1.1-8c45afe9-SNAPSHOT