Skip to content

Commit

Permalink
Remove other usage of deprecated HoodieTimer constructor and remove r…
Browse files Browse the repository at this point in the history
…edundant startTimer() calls
  • Loading branch information
yihua committed Oct 22, 2022
1 parent dc85aaa commit 398401f
Show file tree
Hide file tree
Showing 33 changed files with 75 additions and 83 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@

package org.apache.hudi.cli.commands;

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.cli.HoodieCLI;
import org.apache.hudi.cli.HoodiePrintHelper;
import org.apache.hudi.cli.TableHeader;
Expand All @@ -35,6 +33,9 @@
import org.apache.hudi.metadata.HoodieBackedTableMetadata;
import org.apache.hudi.metadata.HoodieTableMetadata;
import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter;

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.spark.api.java.JavaSparkContext;
Expand Down Expand Up @@ -122,7 +123,7 @@ public String create(
HoodieCLI.fs.mkdirs(metadataPath);
}

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
HoodieWriteConfig writeConfig = getWriteConfig();
initJavaSparkContext(Option.of(master));
SparkHoodieBackedTableMetadataWriter.create(HoodieCLI.conf, writeConfig, new HoodieSparkEngineContext(jsc));
Expand Down Expand Up @@ -158,7 +159,7 @@ public String init(@ShellOption(value = "--sparkMaster", defaultValue = SparkUti
throw new RuntimeException("Metadata directory (" + metadataPath.toString() + ") does not exist.");
}

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
if (!readOnly) {
HoodieWriteConfig writeConfig = getWriteConfig();
initJavaSparkContext(Option.of(master));
Expand Down Expand Up @@ -206,7 +207,7 @@ public String listPartitions(
return "[ERROR] Metadata Table not enabled/initialized\n\n";
}

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
List<String> partitions = metadata.getAllPartitionPaths();
LOG.debug("Took " + timer.endTimer() + " ms");

Expand Down Expand Up @@ -239,7 +240,7 @@ public String listFiles(
partitionPath = new Path(HoodieCLI.basePath, partition);
}

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
FileStatus[] statuses = metaReader.getAllFilesInPartition(partitionPath);
LOG.debug("Took " + timer.endTimer() + " ms");

Expand Down Expand Up @@ -271,7 +272,7 @@ public String validateFiles(
HoodieBackedTableMetadata fsMetaReader = new HoodieBackedTableMetadata(
new HoodieLocalEngineContext(HoodieCLI.conf), fsConfig, HoodieCLI.basePath, "/tmp");

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
List<String> metadataPartitions = metadataReader.getAllPartitionPaths();
LOG.debug("Listing partitions Took " + timer.endTimer() + " ms");
List<String> fsPartitions = fsMetaReader.getAllPartitionPaths();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@

package org.apache.hudi.index;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.FileSlice;
Expand All @@ -35,6 +33,9 @@
import org.apache.hudi.io.storage.HoodieFileReader;
import org.apache.hudi.io.storage.HoodieFileReaderFactory;
import org.apache.hudi.table.HoodieTable;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;

Expand Down Expand Up @@ -152,7 +153,7 @@ public static List<String> filterKeysFromFile(Path filePath, List<String> candid
try {
// Load all rowKeys from the file, to double-confirm
if (!candidateRecordKeys.isEmpty()) {
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
HoodieFileReader fileReader = HoodieFileReaderFactory.getFileReader(configuration, filePath);
Set<String> fileRowKeys = fileReader.filterRowKeys(new TreeSet<>(candidateRecordKeys));
foundRecordKeys.addAll(fileRowKeys);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public HoodieKeyLookupHandle(HoodieWriteConfig config, HoodieTable<T, I, K, O> h

private BloomFilter getBloomFilter() {
BloomFilter bloomFilter = null;
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
try {
if (config.getBloomIndexUseMetadata()
&& hoodieTable.getMetaClient().getTableConfig().getMetadataPartitions()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@

import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.HashMap;
import java.util.List;

import static org.apache.hudi.common.util.StringUtils.isNullOrEmpty;

Expand Down Expand Up @@ -124,7 +124,7 @@ protected HoodieWriteHandle(HoodieWriteConfig config, String instantTime, String
this.tableSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(tableSchema, config.allowOperationMetadataField());
this.writeSchema = overriddenSchema.orElseGet(() -> getWriteSchema(config));
this.writeSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(writeSchema, config.allowOperationMetadataField());
this.timer = HoodieTimer.start().startTimer();
this.timer = HoodieTimer.start();
this.writeStatus = (WriteStatus) ReflectionUtils.loadClass(config.getWriteStatusClassName(),
!hoodieTable.getIndex().isImplicitWithStorage(), config.getWriteStatusFailureFraction());
this.taskContextSupplier = taskContextSupplier;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ public void initTableMetadata() {
protected <T extends SpecificRecordBase> void initializeIfNeeded(HoodieTableMetaClient dataMetaClient,
Option<T> actionMetadata,
Option<String> inflightInstantTimestamp) throws IOException {
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();

boolean exists = metadataTableExists(dataMetaClient, actionMetadata);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@

package org.apache.hudi.table.action.clean;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import org.apache.hudi.avro.model.HoodieActionInstant;
import org.apache.hudi.avro.model.HoodieCleanMetadata;
import org.apache.hudi.avro.model.HoodieCleanerPlan;
Expand All @@ -43,6 +40,8 @@
import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.table.action.BaseActionExecutor;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;

Expand Down Expand Up @@ -199,7 +198,6 @@ private HoodieCleanMetadata runClean(HoodieTable<T, I, K, O> table, HoodieInstan
HoodieInstant inflightInstant = null;
try {
final HoodieTimer timer = HoodieTimer.start();
timer.startTimer();
if (cleanInstant.isRequested()) {
inflightInstant = table.getActiveTimeline().transitionCleanRequestedToInflight(cleanInstant,
TimelineMetadataUtils.serializeCleanerPlan(cleanerPlan));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@ public RunIndexActionExecutor(HoodieEngineContext context, HoodieWriteConfig con
@Override
public Option<HoodieIndexCommitMetadata> execute() {
HoodieTimer indexTimer = HoodieTimer.start();
indexTimer.startTimer();

HoodieInstant indexInstant = validateAndGetIndexInstant();
// read HoodieIndexPlan
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ public BaseRestoreActionExecutor(HoodieEngineContext context,
@Override
public HoodieRestoreMetadata execute() {
HoodieTimer restoreTimer = HoodieTimer.start();
restoreTimer.startTimer();

Option<HoodieInstant> restoreInstant = table.getRestoreTimeline()
.filterInflightsAndRequested()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ private HoodieRollbackMetadata runRollback(HoodieTable<T, I, K, O> table, Hoodie
? table.getActiveTimeline().transitionRollbackRequestedToInflight(rollbackInstant)
: rollbackInstant;

HoodieTimer rollbackTimer = HoodieTimer.start().startTimer();
HoodieTimer rollbackTimer = HoodieTimer.start();
List<HoodieRollbackStat> stats = doRollbackAndGetStats(rollbackPlan);
HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.convertRollbackMetadata(
instantTime,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ public CopyOnWriteRollbackActionExecutor(HoodieEngineContext context,
@Override
protected List<HoodieRollbackStat> executeRollback(HoodieRollbackPlan hoodieRollbackPlan) {
HoodieTimer rollbackTimer = HoodieTimer.start();
rollbackTimer.startTimer();

List<HoodieRollbackStat> stats = new ArrayList<>();
HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ public MergeOnReadRollbackActionExecutor(HoodieEngineContext context,
@Override
protected List<HoodieRollbackStat> executeRollback(HoodieRollbackPlan hoodieRollbackPlan) {
HoodieTimer rollbackTimer = HoodieTimer.start();
rollbackTimer.startTimer();

LOG.info("Rolling back instant " + instantToRollback);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ protected Option<Path> create(String partitionPath, String dataFileName, IOType
}

private Option<Path> create(Path markerPath, boolean checkIfExists) {
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
Path dirPath = markerPath.getParent();
try {
if (!fs.exists(dirPath)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ public Set<String> allMarkerFilePaths() {

@Override
protected Option<Path> create(String partitionPath, String dataFileName, IOType type, boolean checkIfExists) {
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
String markerFileName = getMarkerFileName(dataFileName, type);

Map<String, String> paramsMap = new HashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,6 @@

package org.apache.hudi.table.action.commit;

import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.avro.model.HoodieRequestedReplaceMetadata;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.data.HoodieData;
Expand All @@ -42,6 +36,14 @@
import org.apache.hudi.table.WorkloadStat;
import org.apache.hudi.table.action.HoodieWriteMetadata;

import org.apache.hadoop.fs.Path;

import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static org.apache.hudi.common.table.timeline.HoodieInstant.State.REQUESTED;
import static org.apache.hudi.common.table.timeline.HoodieTimeline.REPLACE_COMMIT_ACTION;

Expand All @@ -59,7 +61,7 @@ public SparkDeletePartitionCommitActionExecutor(HoodieEngineContext context,
@Override
public HoodieWriteMetadata<HoodieData<WriteStatus>> execute() {
try {
HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
context.setJobStatus(this.getClass().getSimpleName(), "Gather all file ids from all deleting partitions.");
Map<String, List<String>> partitionToReplaceFileIds =
HoodieJavaPairRDD.getJavaPairRDD(context.parallelize(partitions).distinct()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2400,7 +2400,7 @@ private void validateMetadata(SparkRDDWriteClient testClient) throws IOException
return;
}

HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);

// Partitions should match
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,6 @@

package org.apache.hudi.testutils;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hudi.HoodieConversionUtils;
import org.apache.hudi.avro.model.HoodieActionInstant;
import org.apache.hudi.avro.model.HoodieCleanMetadata;
Expand Down Expand Up @@ -75,6 +68,14 @@
import org.apache.hudi.table.WorkloadStat;
import org.apache.hudi.timeline.service.TimelineService;
import org.apache.hudi.util.JFunction;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaRDD;
Expand All @@ -86,7 +87,6 @@
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import scala.Tuple2;

import java.io.IOException;
import java.io.Serializable;
Expand All @@ -105,6 +105,8 @@
import java.util.function.Function;
import java.util.stream.Collectors;

import scala.Tuple2;

import static org.apache.hudi.common.util.CleanerUtils.convertCleanMetadata;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
Expand Down Expand Up @@ -567,7 +569,7 @@ public void validateMetadata(HoodieTestTable testTable, List<String> inflightCom
}
assertEquals(inflightCommits, testTable.inflightCommits());

HoodieTimer timer = HoodieTimer.start().startTimer();
HoodieTimer timer = HoodieTimer.start();
HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);

// Partitions should match
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.apache.hudi.common.util.Option;
import org.apache.hudi.exception.HoodieException;
import org.apache.hudi.exception.HoodieIOException;
import org.apache.hudi.hadoop.CachingPath;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
Expand All @@ -49,7 +50,6 @@
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.apache.hudi.hadoop.CachingPath;

import java.io.IOException;
import java.net.URI;
Expand Down Expand Up @@ -102,7 +102,7 @@ private static Registry getMetricRegistryForPath(Path p) {
}

protected static <R> R executeFuncWithTimeMetrics(String metricName, Path p, CheckedFunction<R> func) throws IOException {
HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
R res = func.get();

Registry registry = getMetricRegistryForPath(p);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ protected void refreshTimeline(HoodieTimeline visibleActiveTimeline) {
* Adds the provided statuses into the file system view, and also caches it inside this object.
*/
public List<HoodieFileGroup> addFilesToView(FileStatus[] statuses) {
HoodieTimer timer = new HoodieTimer().startTimer();
HoodieTimer timer = HoodieTimer.start();
List<HoodieFileGroup> fileGroups = buildFileGroups(statuses, visibleCommitsAndCompactionTimeline, true);
long fgBuildTimeTakenMs = timer.endTimer();
timer.startTimer();
Expand Down Expand Up @@ -216,8 +216,7 @@ protected List<HoodieFileGroup> buildFileGroups(Stream<HoodieBaseFile> baseFileS
* Get replaced instant for each file group by looking at all commit instants.
*/
private void resetFileGroupsReplaced(HoodieTimeline timeline) {
HoodieTimer hoodieTimer = new HoodieTimer();
hoodieTimer.startTimer();
HoodieTimer hoodieTimer = HoodieTimer.start();
// for each REPLACE instant, get map of (partitionPath -> deleteFileGroup)
HoodieTimeline replacedTimeline = timeline.getCompletedReplaceTimeline();
Stream<Map.Entry<HoodieFileGroupId, HoodieInstant>> resultStream = replacedTimeline.getInstants().flatMap(instant -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,7 @@ public <K extends Serializable, T extends Serializable> T get(String columnFamil
*/
public <T extends Serializable> Stream<Pair<String, T>> prefixSearch(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
final HoodieTimer timer = new HoodieTimer();
timer.startTimer();
final HoodieTimer timer = HoodieTimer.start();
long timeTakenMicro = 0;
List<Pair<String, T>> results = new LinkedList<>();
try (final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName))) {
Expand Down
Loading

0 comments on commit 398401f

Please sign in to comment.