Skip to content

Commit 163a765

Browse files
committed
HADOOP-19415. Fix CheckStyle Issue.
1 parent d066cb6 commit 163a765

File tree

9 files changed

+45
-16
lines changed

9 files changed

+45
-16
lines changed

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestVectoredReadUtils.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -688,7 +688,7 @@ private static FileRange retrieve(List<FileRange> input, String key) {
688688
}
689689

690690
/**
691-
* Mock run a vectored read and validate the results with the
691+
* Mock run a vectored read and validate the results with the
692692
* <ol>
693693
* <li> {@code ByteBufferPositionedReadable.readFully()} is invoked once per range.</li>
694694
* <li> The buffers are filled with data</li>

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@
2323
import org.junit.jupiter.api.Timeout;
2424
import org.junit.jupiter.api.extension.RegisterExtension;
2525

26+
import java.util.concurrent.TimeUnit;
27+
2628
import static org.apache.hadoop.test.HadoopTestBase.TEST_DEFAULT_TIMEOUT_VALUE;
2729

2830
/**
@@ -32,7 +34,7 @@
3234
* Threads are named to the method being executed, for ease of diagnostics
3335
* in logs and thread dumps.
3436
*/
35-
@Timeout(TEST_DEFAULT_TIMEOUT_VALUE)
37+
@Timeout(value = TEST_DEFAULT_TIMEOUT_VALUE, unit = TimeUnit.MILLISECONDS)
3638
public abstract class HadoopTestBase extends Assertions {
3739

3840
/**
@@ -46,8 +48,34 @@ public abstract class HadoopTestBase extends Assertions {
4648
* {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}
4749
* is not set: {@value}.
4850
*/
49-
public static final int TEST_DEFAULT_TIMEOUT_VALUE = 100;
50-
51+
public static final int TEST_DEFAULT_TIMEOUT_VALUE = 100000;
52+
53+
/**
54+
* The JUnit rule that sets the default timeout for tests.
55+
*/
56+
public int defaultTimeout = retrieveTestTimeout();
57+
58+
/**
59+
* Retrieve the test timeout from the system property
60+
* {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}, falling back to
61+
* the value in {@link #TEST_DEFAULT_TIMEOUT_VALUE} if the
62+
* property is not defined.
63+
* @return the recommended timeout for tests
64+
*/
65+
protected int retrieveTestTimeout() {
66+
String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
67+
Integer.toString(
68+
TEST_DEFAULT_TIMEOUT_VALUE));
69+
int millis;
70+
try {
71+
millis = Integer.parseInt(propval);
72+
} catch (NumberFormatException e) {
73+
//fall back to the default value, as the property cannot be parsed
74+
millis = TEST_DEFAULT_TIMEOUT_VALUE;
75+
}
76+
return millis;
77+
}
78+
5179
/**
5280
* The method name.
5381
*/

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,8 @@ public void testRead() throws IOException {
5252
public void testResetWithoutMark() throws IOException {
5353
assertThrows(IOException.class, () -> {
5454
try (LimitInputStream limitInputStream =
55-
new LimitInputStream(new RandomInputStream(), 128)) {
56-
limitInputStream.reset();
55+
new LimitInputStream(new RandomInputStream(), 128)) {
56+
limitInputStream.reset();
5757
}
5858
});
5959
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ public void setup() throws Exception {
7373
// rpc, it would return client port, in this case, it will be the
7474
// auxiliary port for data node. Which is not what auxiliary is for.
7575
// setting service rpc port to avoid this.
76-
clusterConf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:9020");
76+
clusterConf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:9021");
7777
clusterConf.set(
7878
CommonConfigurationKeys.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
7979
"org.apache.hadoop.security.IngressPortBasedResolver");

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ public void testBlockFactoryIO(String pBufferType) throws Throwable {
109109
assertEquals(limit - bufferLen, block.remainingCapacity(),
110110
"capacity in " + block);
111111
assertTrue(block.hasCapacity(64), "hasCapacity(64) in " + block);
112-
assertTrue(
113-
block.hasCapacity(limit - bufferLen), "No capacity in " + block);
112+
assertTrue(block.hasCapacity(limit - bufferLen),
113+
"No capacity in " + block);
114114

115115
// now start the write
116116
S3ADataBlocks.BlockUploadData blockUploadData = block.startUpload();

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -486,9 +486,9 @@ public void testRetryOnThrottle() throws Throwable {
486486
public void testNoRetryOfBadRequestNonIdempotent() throws Throwable {
487487
assertThrows(AWSBadRequestException.class, () -> {
488488
invoker.retry("test", null, false,
489-
() -> {
489+
() -> {
490490
throw serviceException(400, "bad request");
491-
});
491+
});
492492
});
493493
}
494494

@@ -514,7 +514,7 @@ public void testRetryAWSConnectivity() throws Throwable {
514514
public void testRetryBadRequestNotIdempotent() throws Throwable {
515515
assertThrows(AWSBadRequestException.class, () -> {
516516
invoker.retry("test", null, false,
517-
() -> {
517+
() -> {
518518
throw BAD_REQUEST;
519519
});
520520
});

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ public void testPartitionsResolution() throws Throwable {
163163
Path rootFile = new Path(attemptPath, "root.txt");
164164
touch(attemptFS, rootFile);
165165
assertThat(listPartitions(attemptFS, attemptPath)).
166-
containsAnyOf(oct2017, StagingCommitterConstants.TABLE_ROOT);
166+
containsAnyOf(oct2017, StagingCommitterConstants.TABLE_ROOT);
167167
}
168168

169169
/**

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ public void testRoundTrip2() throws Throwable {
6565
assertEquals(1, result.getPartNumber());
6666
assertEquals("11223344", result.getEtag());
6767
assertEquals(len, result.getLen());
68-
assertThat(result.getChecksumAlgorithm())
69-
.describedAs("Checksum algorithm must not be present").isNull();
68+
assertThat(result.getChecksumAlgorithm())
69+
.describedAs("Checksum algorithm must not be present").isNull();
7070
assertThat(result.getChecksum())
7171
.describedAs("Checksum must not be generated").isNull();
7272
}

hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAzureBlobFileSystemBasics.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.hadoop.fs.azurebfs.contract;
1919

2020
import java.io.IOException;
21+
import java.util.concurrent.TimeUnit;
2122

2223
import org.apache.hadoop.fs.FileSystemContractBaseTest;
2324
import org.apache.hadoop.fs.FileStatus;
@@ -36,7 +37,7 @@
3637
/**
3738
* Basic Contract test for Azure BlobFileSystem.
3839
*/
39-
@Timeout(TEST_TIMEOUT)
40+
@Timeout(value = TEST_TIMEOUT, unit = TimeUnit.MILLISECONDS)
4041
public class ITestAzureBlobFileSystemBasics extends FileSystemContractBaseTest {
4142
private final ABFSContractTestBinding binding;
4243

0 commit comments

Comments
 (0)