Skip to content

Commit

Permalink
[To dev/1.3] Add cache hit situation and actual io size for BloomFilt…
Browse files Browse the repository at this point in the history
…er, TimeSeriesMetadata(including ChunkMetadatList) and Chunk (#14082)
  • Loading branch information
JackieTien97 authored Nov 14, 2024
1 parent be0c8ad commit 85fd7b5
Show file tree
Hide file tree
Showing 17 changed files with 681 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -654,6 +654,13 @@ public synchronized void releaseResource() {
long durationTime = System.currentTimeMillis() - executionStartTime.get();
QueryRelatedResourceMetricSet.getInstance().updateFragmentInstanceTime(durationTime);

SeriesScanCostMetricSet.getInstance()
.recordBloomFilterMetrics(
getQueryStatistics().getLoadBloomFilterFromCacheCount().get(),
getQueryStatistics().getLoadBloomFilterFromDiskCount().get(),
getQueryStatistics().getLoadBloomFilterActualIOSize().get(),
getQueryStatistics().getLoadBloomFilterTime().get());

SeriesScanCostMetricSet.getInstance()
.recordNonAlignedTimeSeriesMetadataCount(
getQueryStatistics().getLoadTimeSeriesMetadataDiskSeqCount().get(),
Expand All @@ -679,6 +686,12 @@ public synchronized void releaseResource() {
getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemSeqTime().get(),
getQueryStatistics().getLoadTimeSeriesMetadataAlignedMemUnSeqTime().get());

SeriesScanCostMetricSet.getInstance()
.recordTimeSeriesMetadataMetrics(
getQueryStatistics().getLoadTimeSeriesMetadataFromCacheCount().get(),
getQueryStatistics().getLoadTimeSeriesMetadataFromDiskCount().get(),
getQueryStatistics().getLoadTimeSeriesMetadataActualIOSize().get());

SeriesScanCostMetricSet.getInstance()
.recordConstructChunkReadersCount(
getQueryStatistics().getConstructAlignedChunkReadersMemCount().get(),
Expand All @@ -692,6 +705,12 @@ public synchronized void releaseResource() {
getQueryStatistics().getConstructNonAlignedChunkReadersMemTime().get(),
getQueryStatistics().getConstructNonAlignedChunkReadersDiskTime().get());

SeriesScanCostMetricSet.getInstance()
.recordChunkMetrics(
getQueryStatistics().getLoadChunkFromCacheCount().get(),
getQueryStatistics().getLoadChunkFromDiskCount().get(),
getQueryStatistics().getLoadChunkActualIOSize().get());

SeriesScanCostMetricSet.getInstance()
.recordPageReadersDecompressCount(
getQueryStatistics().getPageReadersDecodeAlignedMemCount().get(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@
*/
public class QueryStatistics {

private final AtomicLong loadBloomFilterFromCacheCount = new AtomicLong(0);
private final AtomicLong loadBloomFilterFromDiskCount = new AtomicLong(0);
private final AtomicLong loadBloomFilterActualIOSize = new AtomicLong(0);
private final AtomicLong loadBloomFilterTime = new AtomicLong(0);

// statistics for count and time of load timeseriesmetadata
private final AtomicLong loadTimeSeriesMetadataDiskSeqCount = new AtomicLong(0);
private final AtomicLong loadTimeSeriesMetadataDiskUnSeqCount = new AtomicLong(0);
Expand All @@ -48,6 +53,10 @@ public class QueryStatistics {
private final AtomicLong loadTimeSeriesMetadataAlignedMemSeqTime = new AtomicLong(0);
private final AtomicLong loadTimeSeriesMetadataAlignedMemUnSeqTime = new AtomicLong(0);

private final AtomicLong loadTimeSeriesMetadataFromCacheCount = new AtomicLong(0);
private final AtomicLong loadTimeSeriesMetadataFromDiskCount = new AtomicLong(0);
private final AtomicLong loadTimeSeriesMetadataActualIOSize = new AtomicLong(0);

// statistics for count and time of construct chunk readers(disk io and decompress)
private final AtomicLong constructNonAlignedChunkReadersDiskCount = new AtomicLong(0);
private final AtomicLong constructNonAlignedChunkReadersMemCount = new AtomicLong(0);
Expand All @@ -59,6 +68,10 @@ public class QueryStatistics {
private final AtomicLong constructAlignedChunkReadersDiskTime = new AtomicLong(0);
private final AtomicLong constructAlignedChunkReadersMemTime = new AtomicLong(0);

private final AtomicLong loadChunkFromCacheCount = new AtomicLong(0);
private final AtomicLong loadChunkFromDiskCount = new AtomicLong(0);
private final AtomicLong loadChunkActualIOSize = new AtomicLong(0);

// statistics for count and time of page decode
private final AtomicLong pageReadersDecodeAlignedDiskCount = new AtomicLong(0);
private final AtomicLong pageReadersDecodeAlignedDiskTime = new AtomicLong(0);
Expand Down Expand Up @@ -225,6 +238,46 @@ public AtomicLong getPageReaderMaxUsedMemorySize() {
return pageReaderMaxUsedMemorySize;
}

public AtomicLong getLoadBloomFilterActualIOSize() {
return loadBloomFilterActualIOSize;
}

public AtomicLong getLoadBloomFilterFromCacheCount() {
return loadBloomFilterFromCacheCount;
}

public AtomicLong getLoadBloomFilterFromDiskCount() {
return loadBloomFilterFromDiskCount;
}

public AtomicLong getLoadBloomFilterTime() {
return loadBloomFilterTime;
}

public AtomicLong getLoadChunkActualIOSize() {
return loadChunkActualIOSize;
}

public AtomicLong getLoadChunkFromCacheCount() {
return loadChunkFromCacheCount;
}

public AtomicLong getLoadChunkFromDiskCount() {
return loadChunkFromDiskCount;
}

public AtomicLong getLoadTimeSeriesMetadataActualIOSize() {
return loadTimeSeriesMetadataActualIOSize;
}

public AtomicLong getLoadTimeSeriesMetadataFromCacheCount() {
return loadTimeSeriesMetadataFromCacheCount;
}

public AtomicLong getLoadTimeSeriesMetadataFromDiskCount() {
return loadTimeSeriesMetadataFromDiskCount;
}

public TQueryStatistics toThrift() {
return new TQueryStatistics(
loadTimeSeriesMetadataDiskSeqCount.get(),
Expand Down Expand Up @@ -263,6 +316,16 @@ public TQueryStatistics toThrift() {
alignedTimeSeriesMetadataModificationCount.get(),
alignedTimeSeriesMetadataModificationTime.get(),
nonAlignedTimeSeriesMetadataModificationCount.get(),
nonAlignedTimeSeriesMetadataModificationTime.get());
nonAlignedTimeSeriesMetadataModificationTime.get(),
loadBloomFilterFromCacheCount.get(),
loadBloomFilterFromDiskCount.get(),
loadBloomFilterActualIOSize.get(),
loadBloomFilterTime.get(),
loadTimeSeriesMetadataFromCacheCount.get(),
loadTimeSeriesMetadataFromDiskCount.get(),
loadTimeSeriesMetadataActualIOSize.get(),
loadChunkFromCacheCount.get(),
loadChunkFromDiskCount.get(),
loadChunkActualIOSize.get());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemAlignedChunkMetadataLoader;
import org.apache.iotdb.db.storageengine.dataregion.read.reader.chunk.metadata.MemChunkMetadataLoader;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.ITimeIndex;

import org.apache.tsfile.file.metadata.AlignedTimeSeriesMetadata;
import org.apache.tsfile.file.metadata.IChunkMetadata;
Expand Down Expand Up @@ -99,8 +100,9 @@ public static TimeseriesMetadata loadTimeSeriesMetadata(
new PlainDeviceID(seriesPath.getDevice()),
seriesPath.getMeasurement()),
allSensors,
resource.getTimeIndexType() != 1,
context.isDebug());
resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE,
context.isDebug(),
context);
if (timeSeriesMetadata != null) {
long t2 = System.nanoTime();
List<Modification> pathModifications = context.getPathModifications(resource, seriesPath);
Expand Down Expand Up @@ -268,8 +270,9 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk(
filePath,
new TimeSeriesMetadataCacheKey(resource.getTsFileID(), deviceId, ""),
allSensors,
resource.getTimeIndexType() != 1,
isDebug);
resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE,
isDebug,
context);
if (timeColumn != null) {
// only need time column, like count_time aggregation
if (valueMeasurementList.isEmpty()) {
Expand All @@ -290,8 +293,9 @@ private static AlignedTimeSeriesMetadata loadAlignedTimeSeriesMetadataFromDisk(
new TimeSeriesMetadataCacheKey(
resource.getTsFileID(), deviceId, valueMeasurement),
allSensors,
resource.getTimeIndexType() != 1,
isDebug);
resource.getTimeIndexType() == ITimeIndex.FILE_TIME_INDEX_TYPE,
isDebug,
context);
exist = (exist || (valueColumn != null));
valueTimeSeriesMetadataList.add(valueColumn);
}
Expand Down
Loading

0 comments on commit 85fd7b5

Please sign in to comment.