Skip to content

Commit e21088c

Browse files
author
fanshilun
committed
HDFS-17719. Fix CheckStyle & Junit Test.
1 parent 2cb4855 commit e21088c

File tree

8 files changed

+68
-65
lines changed

8 files changed

+68
-65
lines changed

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java

Lines changed: 43 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ private void testCreate() throws Exception {
251251
try {
252252
testCreate(path, false);
253253
fail("the create should have failed because the file exists " +
254-
"and override is FALSE");
254+
"and override is FALSE");
255255
} catch (IOException ex) {
256256
System.out.println("#");
257257
} catch (Exception ex) {
@@ -311,8 +311,8 @@ private void testTruncate() throws Exception {
311311

312312
private void assertPathCapabilityForTruncate(Path file) throws Exception {
313313
FileSystem fs = this.getHttpFSFileSystem();
314-
assertTrue(
315-
fs.hasPathCapability(file, CommonPathCapabilities.FS_TRUNCATE), "HttpFS/WebHdfs/SWebHdfs support truncate");
314+
assertTrue(fs.hasPathCapability(file, CommonPathCapabilities.FS_TRUNCATE),
315+
"HttpFS/WebHdfs/SWebHdfs support truncate");
316316
fs.close();
317317
}
318318

@@ -482,18 +482,18 @@ private void testFileStatusAttr() throws Exception {
482482
// Get the FileSystem instance that's being tested
483483
FileSystem fs = this.getHttpFSFileSystem();
484484
// Check FileStatus
485-
assertFalse(
486-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot should be disallowed by default");
485+
assertFalse(fs.getFileStatus(path).isSnapshotEnabled(),
486+
"Snapshot should be disallowed by default");
487487
// Allow snapshot
488488
distributedFs.allowSnapshot(path);
489489
// Check FileStatus
490-
assertTrue(
491-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot enabled bit is not set in FileStatus");
490+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
491+
"Snapshot enabled bit is not set in FileStatus");
492492
// Disallow snapshot
493493
distributedFs.disallowSnapshot(path);
494494
// Check FileStatus
495-
assertFalse(
496-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot enabled bit is not cleared in FileStatus");
495+
assertFalse(fs.getFileStatus(path).isSnapshotEnabled(),
496+
"Snapshot enabled bit is not cleared in FileStatus");
497497
// Cleanup
498498
fs.delete(path, true);
499499
fs.close();
@@ -1174,9 +1174,9 @@ private void testStoragePolicy() throws Exception {
11741174
Path path = new Path(getProxiedFSTestDir(), "policy.txt");
11751175
FileSystem httpfs = getHttpFSFileSystem();
11761176
// test getAllStoragePolicies
1177-
assertArrayEquals(
1178-
1179-
fs.getAllStoragePolicies().toArray(), httpfs.getAllStoragePolicies().toArray(), "Policy array returned from the DFS and HttpFS should be equals");
1177+
assertArrayEquals(fs.getAllStoragePolicies().toArray(),
1178+
httpfs.getAllStoragePolicies().toArray(),
1179+
"Policy array returned from the DFS and HttpFS should be equals");
11801180

11811181
// test get/set/unset policies
11821182
DFSTestUtil.createFile(fs, path, 0, (short) 1, 0L);
@@ -1191,14 +1191,12 @@ private void testStoragePolicy() throws Exception {
11911191
assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
11921192
httpFsPolicy.getName(), "Storage policy returned from the get API should" +
11931193
" be same as set policy");
1194-
assertEquals(
1195-
1196-
httpFsPolicy, dfsPolicy, "Storage policy returned from the DFS and HttpFS should be equals");
1194+
assertEquals(httpFsPolicy, dfsPolicy,
1195+
"Storage policy returned from the DFS and HttpFS should be equals");
11971196
// unset policy
11981197
httpfs.unsetStoragePolicy(path);
1199-
assertEquals(
1200-
defaultdfsPolicy, httpfs.getStoragePolicy(path), "After unset storage policy, the get API shoudld"
1201-
+ " return the default policy");
1198+
assertEquals(defaultdfsPolicy, httpfs.getStoragePolicy(path),
1199+
"After unset storage policy, the get API shoudld return the default policy");
12021200
fs.close();
12031201
}
12041202

@@ -1433,16 +1431,16 @@ private void testCreateSnapshot(String snapshotName) throws Exception {
14331431
}
14341432
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
14351433
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
1436-
assertTrue(
1437-
snapshotItems.length == 1, "Should have exactly one snapshot.");
1434+
assertTrue(snapshotItems.length == 1,
1435+
"Should have exactly one snapshot.");
14381436
String resultingSnapName = snapshotItems[0].getPath().getName();
14391437
if (snapshotName == null) {
14401438
assertTrue(
14411439
Pattern.matches("(s)(\\d{8})(-)(\\d{6})(\\.)(\\d{3})",
14421440
resultingSnapName), "Snapshot auto generated name not matching pattern");
14431441
} else {
1444-
assertTrue(
1445-
snapshotName.equals(resultingSnapName), "Snapshot name is not same as passed name.");
1442+
assertTrue(snapshotName.equals(resultingSnapName),
1443+
"Snapshot name is not same as passed name.");
14461444
}
14471445
cleanSnapshotTests(snapshottablePath, resultingSnapName);
14481446
}
@@ -1492,11 +1490,11 @@ private void testRenameSnapshot() throws Exception {
14921490
"snap-new-name");
14931491
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
14941492
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
1495-
assertTrue(
1496-
snapshotItems.length == 1, "Should have exactly one snapshot.");
1493+
assertTrue(snapshotItems.length == 1,
1494+
"Should have exactly one snapshot.");
14971495
String resultingSnapName = snapshotItems[0].getPath().getName();
1498-
assertTrue(
1499-
"snap-new-name".equals(resultingSnapName), "Snapshot name is not same as passed name.");
1496+
assertTrue("snap-new-name".equals(resultingSnapName),
1497+
"Snapshot name is not same as passed name.");
15001498
cleanSnapshotTests(snapshottablePath, resultingSnapName);
15011499
}
15021500
}
@@ -1510,12 +1508,12 @@ private void testDeleteSnapshot() throws Exception {
15101508
fs.createSnapshot(snapshottablePath, "snap-to-delete");
15111509
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
15121510
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
1513-
assertTrue(
1514-
snapshotItems.length == 1, "Should have exactly one snapshot.");
1511+
assertTrue(snapshotItems.length == 1,
1512+
"Should have exactly one snapshot.");
15151513
fs.deleteSnapshot(snapshottablePath, "snap-to-delete");
15161514
snapshotItems = fs.listStatus(snapshotsDir);
1517-
assertTrue(
1518-
snapshotItems.length == 0, "There should be no snapshot anymore.");
1515+
assertTrue(snapshotItems.length == 0,
1516+
"There should be no snapshot anymore.");
15191517
fs.delete(snapshottablePath, true);
15201518
}
15211519
}
@@ -1528,8 +1526,8 @@ private void testAllowSnapshot() throws Exception {
15281526
// Get the FileSystem instance that's being tested
15291527
FileSystem fs = this.getHttpFSFileSystem();
15301528
// Check FileStatus
1531-
assertFalse(
1532-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot should be disallowed by default");
1529+
assertFalse(fs.getFileStatus(path).isSnapshotEnabled(),
1530+
"Snapshot should be disallowed by default");
15331531
// Allow snapshot
15341532
if (fs instanceof HttpFSFileSystem) {
15351533
HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
@@ -1542,8 +1540,8 @@ private void testAllowSnapshot() throws Exception {
15421540
" doesn't support allowSnapshot");
15431541
}
15441542
// Check FileStatus
1545-
assertTrue(
1546-
fs.getFileStatus(path).isSnapshotEnabled(), "allowSnapshot failed");
1543+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
1544+
"allowSnapshot failed");
15471545
// Cleanup
15481546
fs.delete(path, true);
15491547
}
@@ -1557,8 +1555,8 @@ private void testDisallowSnapshot() throws Exception {
15571555
// Get the FileSystem instance that's being tested
15581556
FileSystem fs = this.getHttpFSFileSystem();
15591557
// Check FileStatus
1560-
assertTrue(
1561-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot should be allowed by DFS");
1558+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
1559+
"Snapshot should be allowed by DFS");
15621560
// Disallow snapshot
15631561
if (fs instanceof HttpFSFileSystem) {
15641562
HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
@@ -1571,8 +1569,8 @@ private void testDisallowSnapshot() throws Exception {
15711569
" doesn't support disallowSnapshot");
15721570
}
15731571
// Check FileStatus
1574-
assertFalse(
1575-
fs.getFileStatus(path).isSnapshotEnabled(), "disallowSnapshot failed");
1572+
assertFalse(fs.getFileStatus(path).isSnapshotEnabled(),
1573+
"disallowSnapshot failed");
15761574
// Cleanup
15771575
fs.delete(path, true);
15781576
}
@@ -1586,8 +1584,8 @@ private void testDisallowSnapshotException() throws Exception {
15861584
// Get the FileSystem instance that's being tested
15871585
FileSystem fs = this.getHttpFSFileSystem();
15881586
// Check FileStatus
1589-
assertTrue(
1590-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot should be allowed by DFS");
1587+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
1588+
"Snapshot should be allowed by DFS");
15911589
// Create some snapshots
15921590
fs.createSnapshot(path, "snap-01");
15931591
fs.createSnapshot(path, "snap-02");
@@ -1619,8 +1617,8 @@ private void testDisallowSnapshotException() throws Exception {
16191617
}
16201618
// Check FileStatus, should still be enabled since
16211619
// disallow snapshot should fail
1622-
assertTrue(
1623-
fs.getFileStatus(path).isSnapshotEnabled(), "disallowSnapshot should not have succeeded");
1620+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
1621+
"disallowSnapshot should not have succeeded");
16241622
// Cleanup
16251623
fs.deleteSnapshot(path, "snap-02");
16261624
fs.deleteSnapshot(path, "snap-01");
@@ -1707,8 +1705,8 @@ private void testGetSnapshotDiffIllegalParam() throws Exception {
17071705
// Get the FileSystem instance that's being tested
17081706
FileSystem fs = this.getHttpFSFileSystem();
17091707
// Check FileStatus
1710-
assertTrue(
1711-
fs.getFileStatus(path).isSnapshotEnabled(), "Snapshot should be allowed by DFS");
1708+
assertTrue(fs.getFileStatus(path).isSnapshotEnabled(),
1709+
"Snapshot should be allowed by DFS");
17121710
assertTrue(fs.getFileStatus(path).isSnapshotEnabled());
17131711
// Get snapshot diff
17141712
testGetSnapshotDiffIllegalParamCase(fs, path, "", "");
@@ -1870,8 +1868,7 @@ private void verifyGetServerDefaults(FileSystem fs, DistributedFileSystem dfs)
18701868
WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
18711869
sds = webHdfsFileSystem.getServerDefaults();
18721870
} else {
1873-
fail(
1874-
fs.getClass().getSimpleName() + " doesn't support getServerDefaults");
1871+
fail(fs.getClass().getSimpleName() + " doesn't support getServerDefaults");
18751872
}
18761873
// Verify result with DFS
18771874
FsServerDefaults dfssds = dfs.getServerDefaults();

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@
4949
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
5050
import org.apache.hadoop.util.JsonSerialization;
5151
import org.json.simple.JSONArray;
52-
import org.junit.jupiter.api.Assertions;
5352

5453
import java.io.BufferedReader;
5554
import java.io.File;
@@ -1670,8 +1669,8 @@ public void testNoRedirect() throws Exception {
16701669
assertTrue(location.contains(DataParam.NAME));
16711670
assertFalse(location.contains(NoRedirectParam.NAME));
16721671
assertTrue(location.contains("CREATE"));
1673-
assertTrue(
1674-
location.startsWith(TestJettyHelper.getJettyURL().toString()), "Wrong location: " + location);
1672+
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
1673+
"Wrong location: " + location);
16751674

16761675
// Use the location to actually write the file
16771676
url = new URL(location);
@@ -1708,8 +1707,8 @@ public void testNoRedirect() throws Exception {
17081707
location = (String)json.get("Location");
17091708
assertTrue(!location.contains(NoRedirectParam.NAME));
17101709
assertTrue(location.contains("OPEN"));
1711-
assertTrue(
1712-
location.startsWith(TestJettyHelper.getJettyURL().toString()), "Wrong location: " + location);
1710+
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
1711+
"Wrong location: " + location);
17131712

17141713
// Use the location to actually read
17151714
url = new URL(location);
@@ -1736,8 +1735,8 @@ public void testNoRedirect() throws Exception {
17361735
location = (String)json.get("Location");
17371736
assertTrue(!location.contains(NoRedirectParam.NAME));
17381737
assertTrue(location.contains("GETFILECHECKSUM"));
1739-
assertTrue(
1740-
location.startsWith(TestJettyHelper.getJettyURL().toString()), "Wrong location: " + location);
1738+
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
1739+
"Wrong location: " + location);
17411740

17421741
// Use the location to actually get the checksum
17431742
url = new URL(location);
@@ -1835,8 +1834,8 @@ public void testECPolicy() throws Exception {
18351834
JSONObject jsonObject = (JSONObject) parser.parse(getFileStatusResponse);
18361835
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
18371836
String ecpolicyForECfile = (String) details.get("ecPolicy");
1838-
assertEquals(
1839-
ecpolicyForECfile, ecPolicyName, "EC policy for ecFile should match the set EC policy");
1837+
assertEquals(ecpolicyForECfile, ecPolicyName,
1838+
"EC policy for ecFile should match the set EC policy");
18401839

18411840
// Verify httpFs getFileStatus with WEBHDFS REST API
18421841
WebHdfsFileSystem httpfsWebHdfs = (WebHdfsFileSystem) FileSystem.get(

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerWebServer.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,8 @@ private <T extends SignerSecretProvider> void assertSignerSecretProviderType(
187187
server.getWebAppContext().getServletContext()
188188
.getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
189189
assertNotNull(secretProvider, "The secret provider must not be null");
190-
assertEquals(expected, secretProvider.getClass(), "The secret provider must match the following");
190+
assertEquals(expected, secretProvider.getClass(),
191+
"The secret provider must match the following");
191192
}
192193

193194
private void assertServiceRespondsWithOK(URL serviceURL)

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,8 @@ public void loadingSysPropConfig() throws Exception {
431431
@TestDir
432432
public void illegalState1() throws Exception {
433433
assertThrows(IllegalStateException.class, ()->{
434-
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
434+
Server server = new Server("server",
435+
TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
435436
server.destroy();
436437
});
437438
}
@@ -440,7 +441,8 @@ public void illegalState1() throws Exception {
440441
@TestDir
441442
public void illegalState2() throws Exception {
442443
assertThrows(IllegalStateException.class, () -> {
443-
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
444+
Server server = new Server("server",
445+
TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
444446
server.get(Object.class);
445447
});
446448
}
@@ -449,7 +451,8 @@ public void illegalState2() throws Exception {
449451
@TestDir
450452
public void illegalState3() throws Exception {
451453
assertThrows(IllegalStateException.class, () -> {
452-
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
454+
Server server = new Server("server",
455+
TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
453456
server.setService(null);
454457
});
455458
}

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ public void invalidGroupsMapping() throws Exception {
5757
assertThrows(RuntimeException.class, () -> {
5858
String dir = TestDirHelper.getTestDir().getAbsolutePath();
5959
Configuration conf = new Configuration(false);
60-
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
60+
conf.set("server.services", StringUtils.join(",",
61+
Arrays.asList(GroupsService.class.getName())));
6162
conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
6263
Server server = new Server("server", dir, dir, dir, dir, conf);
6364
server.init();

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,12 @@ public void handleTestExecutionException(ExtensionContext context,
4949
String regExp = testExceptionAnnotation.msgRegExp();
5050
Pattern pattern = Pattern.compile(regExp);
5151
if (!pattern.matcher(cause.getMessage()).find()) {
52-
fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage());
52+
fail("Expected Exception Message pattern: " + regExp +
53+
" got message: " + ex.getMessage());
5354
}
5455
} else {
55-
fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName());
56+
fail("Expected Exception: " + klass.getSimpleName() + " got: " +
57+
ex.getClass().getSimpleName());
5658
}
5759
} else {
5860
throw ex;

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ private static class HdfsStatement {
6363
private String testName;
6464
private MiniDFSCluster miniHdfs = null;
6565

66-
public HdfsStatement(String testName) {
66+
HdfsStatement(String testName) {
6767
this.testName = testName;
6868
}
6969

hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
*/
1818
package org.apache.hadoop.test;
1919

20-
import java.io.File;
2120
import java.lang.reflect.Method;
2221
import java.net.InetAddress;
2322
import java.net.InetSocketAddress;
@@ -160,7 +159,8 @@ public void afterEach(ExtensionContext context) throws Exception {
160159
try {
161160
server.stop();
162161
} catch (Exception ex) {
163-
throw new RuntimeException("Could not stop embedded servlet container, " + ex.getMessage(), ex);
162+
throw new RuntimeException("Could not stop embedded servlet container, " +
163+
ex.getMessage(), ex);
164164
}
165165
}
166166
}

0 commit comments

Comments
 (0)