Skip to content

Commit

Permalink
ORC-1492: Fix checkstyle violations for tests in `mapreduce/tools/ben…
Browse files Browse the repository at this point in the history
…ch` modules

### What changes were proposed in this pull request?

This PR aims to fix checkstyle in `mapreduce/tools/bench modules` modules and removes the following.

https://github.com/apache/orc/blob/e4b833809b6bd6eee316232e0e96d24bd7d4f6ee/java/checkstyle-suppressions.xml#L46-L52

Please note that we suppress `Indentation` rule on the following three files additionally.

```xml
<suppress checks="Indentation" files="src/test/org/apache/orc/mapred/TestOrcFileEvolution.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/tools/json/TestJsonSchemaFinder.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/tools/convert/TestJsonReader.java"/>
```

### Why are the changes needed?

To apply Checkstyle on test codes.

### How was this patch tested?

Pass the Cis. Since we removed the global suppression rules on `test` code, CI should verify the result.

Closes apache#1601 from dongjoon-hyun/ORC-1492.

Authored-by: Dongjoon Hyun <dongjoon@apache.org>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
  • Loading branch information
dongjoon-hyun authored and cxzl25 committed Jan 11, 2024
1 parent 0bcde75 commit 70cb171
Show file tree
Hide file tree
Showing 7 changed files with 25 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ private Filter(String complexity, String filterType, boolean normalize)
break;
case "vector":
Reader.Options options = new Reader.Options(conf)
.searchArgument(sArg, new String[0])
.allowSARGToFilter(true);
.searchArgument(sArg, new String[0])
.allowSARGToFilter(true);
filter = FilterFactory.createBatchFilter(options,
FilterBenchUtil.schema,
false,
Expand Down
11 changes: 3 additions & 8 deletions java/checkstyle-suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,11 @@
<suppress checks="Indentation" files="src/test/org/apache/orc/impl/TestSchemaEvolution.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/impl/TestStringRedBlackTree.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/impl/filter/*"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/mapred/TestOrcFileEvolution.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/tools/convert/TestJsonReader.java"/>
<suppress checks="Indentation" files="src/test/org/apache/orc/tools/json/TestJsonSchemaFinder.java"/>

<suppress checks="UnusedImports" files="src/test/*"/>
<suppress checks="AvoidStarImport" files="src/test/*"/>
<suppress checks="CustomImportOrder" files="src/test/*"/>

<!-- Remove the following rule when the test code clean up is completed. -->
<suppress checks="Indentation" files="src/test/*"/>
<suppress checks="RedundantModifier" files="src/test/*"/>
<suppress checks="ModifierOrder" files="src/test/*"/>
<suppress checks="UpperEll" files="src/test/*"/>
<suppress checks="NeedBraces" files="src/test/*"/>
<suppress checks="RegexpSingleline" files="src/test/*"/>
</suppressions>
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public void readWithSArg() throws IOException, InterruptedException {
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
.getRecordReader(split, new JobConf(conf), null);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
Expand All @@ -93,7 +93,7 @@ public void readWithSArgAsFilter() throws IOException {
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
.getRecordReader(split, new JobConf(conf), null);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
Expand Down Expand Up @@ -139,7 +139,7 @@ private void readSingleRowWfilter(long idx) throws IOException, InterruptedExcep
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
.getRecordReader(split, new JobConf(conf), null);
OrcStruct row = new OrcStruct(FilterTestUtil.schema);
long rowCount = 0;
while (r.next(NullWritable.get(), row)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@
import static org.junit.jupiter.api.Assertions.assertEquals;

public class FilterTestUtil {
private final static Logger LOG = LoggerFactory.getLogger(FilterTestUtil.class);
private static final Logger LOG = LoggerFactory.getLogger(FilterTestUtil.class);
public static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
public static final long RowCount = 4000000L;
private static final int scale = 3;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ public void readWithSArg() throws IOException, InterruptedException {
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
new OrcInputFormat<OrcStruct>().createRecordReader(split, attemptContext);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
Expand All @@ -102,8 +101,7 @@ public void readWithSArgAsFilter() throws IOException, InterruptedException {
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
new OrcInputFormat<OrcStruct>().createRecordReader(split, attemptContext);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
Expand Down Expand Up @@ -140,8 +138,7 @@ private void testSingleRowWfilter(long idx) throws IOException, InterruptedExcep
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
new OrcInputFormat<OrcStruct>().createRecordReader(split, attemptContext);
long rowCount = 0;
while (r.nextKeyValue()) {
validateLimitedRow(r.getCurrentValue(), idx);
Expand All @@ -151,8 +148,8 @@ private void testSingleRowWfilter(long idx) throws IOException, InterruptedExcep
assertEquals(1, rowCount);
}

private static long validateFilteredRecordReader(org.apache.hadoop.mapreduce.RecordReader<NullWritable
, OrcStruct> rr)
private static long validateFilteredRecordReader(
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> rr)
throws IOException, InterruptedException {
long rowCount = 0;
while (rr.nextKeyValue()) {
Expand Down
6 changes: 3 additions & 3 deletions java/tools/src/test/org/apache/orc/impl/TestRLEv2.java
Original file line number Diff line number Diff line change
Expand Up @@ -327,9 +327,9 @@ public void testBaseValueLimit() throws Exception {

VectorizedRowBatch batch = schema.createRowBatch();
//the minimum value is beyond RunLengthIntegerWriterV2.BASE_VALUE_LIMIT
long[] input = {-9007199254740992l,-8725724278030337l,-1125762467889153l, -1l,-9007199254740992l,
-9007199254740992l, -497l,127l,-1l,-72057594037927936l,-4194304l,-9007199254740992l,-4503599593816065l,
-4194304l,-8936830510563329l,-9007199254740992l, -1l, -70334384439312l,-4063233l, -6755399441973249l};
long[] input = {-9007199254740992L,-8725724278030337L,-1125762467889153L, -1L,-9007199254740992L,
-9007199254740992L, -497L,127L,-1L,-72057594037927936L,-4194304L,-9007199254740992L,-4503599593816065L,
-4194304L,-8936830510563329L,-9007199254740992L, -1L, -70334384439312L,-4063233L, -6755399441973249L};
for(long data: input) {
appendInt(batch, data);
}
Expand Down
6 changes: 3 additions & 3 deletions java/tools/src/test/org/apache/orc/tools/TestFileDump.java
Original file line number Diff line number Diff line change
Expand Up @@ -285,9 +285,9 @@ public void testDump() throws Exception {
writer.addRowBatch(batch);
}
writer.addUserMetadata("hive.acid.key.index",
StandardCharsets.UTF_8.encode("1,1,1;2,3,5;"));
StandardCharsets.UTF_8.encode("1,1,1;2,3,5;"));
writer.addUserMetadata("some.user.property",
StandardCharsets.UTF_8.encode("foo#bar$baz&"));
StandardCharsets.UTF_8.encode("foo#bar$baz&"));
writer.close();
assertEquals(2079000, writer.getRawDataSize());
assertEquals(21000, writer.getNumberOfRows());
Expand Down Expand Up @@ -334,7 +334,7 @@ public void testDataDump() throws Exception {
format.parse("2014-11-25 00:00:00").getTime())),
"string",
"hello",
"hello",
"hello",
m,
Arrays.asList(100, 200),
10, "foo");
Expand Down

0 comments on commit 70cb171

Please sign in to comment.