From 18b1d9c2e560114cea8373ea2b44776710a68bd2 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Mon, 25 Jul 2022 10:43:33 -0400 Subject: [PATCH 1/9] HDFS-16686. GetJournalEditServlet fails to authorize valid Kerberos request Use DfsServlet to obtain the UGI. --- .../server/GetJournalEditServlet.java | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java index 81b3f8c1a1f1f..f726ff8f84de6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java @@ -27,11 +27,11 @@ import javax.servlet.ServletContext; import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.text.StringEscapeUtils; +import org.apache.hadoop.hdfs.server.namenode.DfsServlet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -64,7 +64,7 @@ * */ @InterfaceAudience.Private -public class GetJournalEditServlet extends HttpServlet { +public class GetJournalEditServlet extends DfsServlet { private static final long serialVersionUID = -4635891628211723009L; private static final Logger LOG = @@ -77,17 +77,11 @@ public class GetJournalEditServlet extends HttpServlet { protected boolean isValidRequestor(HttpServletRequest request, Configuration conf) throws IOException { - String remotePrincipal = request.getUserPrincipal().getName(); - String remoteShortName = request.getRemoteUser(); - if (remotePrincipal == null) { // This really shouldn't happen... - LOG.warn("Received null remoteUser while authorizing access to " + - "GetJournalEditServlet"); - return false; - } + UserGroupInformation ugi = getUGI(request, conf); if (LOG.isDebugEnabled()) { - LOG.debug("Validating request made by " + remotePrincipal + - " / " + remoteShortName + ". This user is: " + + LOG.debug("Validating request made by " + ugi.getUserName() + + " / " + ugi.getShortUserName() + ". This user is: " + UserGroupInformation.getLoginUser()); } @@ -115,9 +109,9 @@ protected boolean isValidRequestor(HttpServletRequest request, Configuration con for (String v : validRequestors) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is comparing to valid requestor: " + v); - if (v != null && v.equals(remotePrincipal)) { + if (v != null && v.equals(ugi.getUserName())) { if (LOG.isDebugEnabled()) - LOG.debug("isValidRequestor is allowing: " + remotePrincipal); + LOG.debug("isValidRequestor is allowing: " + ugi.getUserName()); return true; } } @@ -125,16 +119,16 @@ protected boolean isValidRequestor(HttpServletRequest request, Configuration con // Additionally, we compare the short name of the requestor to this JN's // username, because we want to allow requests from other JNs during // recovery, but we can't enumerate the full list of JNs. - if (remoteShortName.equals( + if (ugi.getShortUserName().equals( UserGroupInformation.getLoginUser().getShortUserName())) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing other JN principal: " + - remotePrincipal); + ugi.getUserName()); return true; } if (LOG.isDebugEnabled()) - LOG.debug("isValidRequestor is rejecting: " + remotePrincipal); + LOG.debug("isValidRequestor is rejecting: " + ugi.getUserName()); return false; } From 32431dea14d97beefc210c8e556d84d2903d119e Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Thu, 11 Aug 2022 15:12:33 -0400 Subject: [PATCH 2/9] Add tests for isValidRequestor --- .../server/TestGetJournalEditServlet.java | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java new file mode 100644 index 0000000000000..9678d8f4ced9f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java @@ -0,0 +1,94 @@ +package org.apache.hadoop.hdfs.qjournal.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.servlet.ServletConfig; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import java.io.IOException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestGetJournalEditServlet { + + private final static Configuration conf = new HdfsConfiguration(); + + private final static GetJournalEditServlet servlet = new GetJournalEditServlet(); + + @BeforeClass + public static void setUp() throws ServletException { + LogManager.getLogger(GetJournalEditServlet.class).setLevel(Level.DEBUG); + + // Configure Hadoop + conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/"); + conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, + "RULE:[2:$1/$2@$0]([nsdj]n/.*@REALM\\.TLD)s/.*/hdfs/\nDEFAULT"); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns"); + conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "nn/_HOST@REALM.TLD"); + + // Configure Kerberos UGI + UserGroupInformation.setConfiguration(conf); + UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser( + "jn/somehost@REALM.TLD")); + + // Initialize the servlet + ServletConfig config = mock(ServletConfig.class); + servlet.init(config); + } + + /** + * Unauthenticated user should be rejected. + * + * @throws IOException for unexpected validation failures + */ + @Test + public void testWithoutUser() throws IOException { + // Test: Make a request without specifying a user + HttpServletRequest request = mock(HttpServletRequest.class); + boolean isValid = servlet.isValidRequestor(request, conf); + + // Verify: The request is invalid + assertThat(isValid).isFalse(); + } + + /** + * Namenode requests should be authorized, since it will match the configured namenode. + * + * @throws IOException for unexpected validation failures + */ + @Test + public void testRequestNameNode() throws IOException, ServletException { + // Test: Make a request from a namenode + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getParameter(UserParam.NAME)).thenReturn("nn/localhost@REALM.TLD"); + boolean isValid = servlet.isValidRequestor(request, conf); + + assertThat(isValid).isTrue(); + } + + /** + * There is a fallback using the short name, which is used by journalnodes. + * + * @throws IOException for unexpected validation failures + */ + @Test + public void testRequestShortName() throws IOException { + // Test: Make a request from a namenode + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getParameter(UserParam.NAME)).thenReturn("jn/localhost@REALM.TLD"); + boolean isValid = servlet.isValidRequestor(request, conf); + + assertThat(isValid).isTrue(); + } + +} \ No newline at end of file From 17b6fd0de3cb228b834f0f142d873e8f23fd1d22 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Thu, 11 Aug 2022 21:33:34 -0400 Subject: [PATCH 3/9] Address Checkstyle and license issues Changed "conf" and "servlet" to uppercase since they are static (the servlet is initialized only once). --- .../server/TestGetJournalEditServlet.java | 38 +++++++++++++------ 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java index 9678d8f4ced9f..f46491d012fa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java @@ -1,3 +1,19 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ package org.apache.hadoop.hdfs.qjournal.server; import org.apache.hadoop.conf.Configuration; @@ -21,29 +37,29 @@ public class TestGetJournalEditServlet { - private final static Configuration conf = new HdfsConfiguration(); + private final static Configuration CONF = new HdfsConfiguration(); - private final static GetJournalEditServlet servlet = new GetJournalEditServlet(); + private final static GetJournalEditServlet SERVLET = new GetJournalEditServlet(); @BeforeClass public static void setUp() throws ServletException { LogManager.getLogger(GetJournalEditServlet.class).setLevel(Level.DEBUG); // Configure Hadoop - conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/"); - conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, + CONF.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/"); + CONF.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1/$2@$0]([nsdj]n/.*@REALM\\.TLD)s/.*/hdfs/\nDEFAULT"); - conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns"); - conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "nn/_HOST@REALM.TLD"); + CONF.set(DFSConfigKeys.DFS_NAMESERVICES, "ns"); + CONF.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "nn/_HOST@REALM.TLD"); // Configure Kerberos UGI - UserGroupInformation.setConfiguration(conf); + UserGroupInformation.setConfiguration(CONF); UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser( "jn/somehost@REALM.TLD")); // Initialize the servlet ServletConfig config = mock(ServletConfig.class); - servlet.init(config); + SERVLET.init(config); } /** @@ -55,7 +71,7 @@ public static void setUp() throws ServletException { public void testWithoutUser() throws IOException { // Test: Make a request without specifying a user HttpServletRequest request = mock(HttpServletRequest.class); - boolean isValid = servlet.isValidRequestor(request, conf); + boolean isValid = SERVLET.isValidRequestor(request, CONF); // Verify: The request is invalid assertThat(isValid).isFalse(); @@ -71,7 +87,7 @@ public void testRequestNameNode() throws IOException, ServletException { // Test: Make a request from a namenode HttpServletRequest request = mock(HttpServletRequest.class); when(request.getParameter(UserParam.NAME)).thenReturn("nn/localhost@REALM.TLD"); - boolean isValid = servlet.isValidRequestor(request, conf); + boolean isValid = SERVLET.isValidRequestor(request, CONF); assertThat(isValid).isTrue(); } @@ -86,7 +102,7 @@ public void testRequestShortName() throws IOException { // Test: Make a request from a namenode HttpServletRequest request = mock(HttpServletRequest.class); when(request.getParameter(UserParam.NAME)).thenReturn("jn/localhost@REALM.TLD"); - boolean isValid = servlet.isValidRequestor(request, conf); + boolean isValid = SERVLET.isValidRequestor(request, CONF); assertThat(isValid).isTrue(); } From f777e41f63a5bf29b82a60db61d70f181756d7d0 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Tue, 16 Aug 2022 20:10:20 -0400 Subject: [PATCH 4/9] Use try-with-resources to ensure close/shutdown Update the tests to ensure that the mini clusters are shutdown properly. --- .../hadoop/hdfs/TestRollingUpgrade.java | 218 +++++++++--------- 1 file changed, 106 insertions(+), 112 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index c428d20e840a8..ead9b861e612a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -87,9 +87,7 @@ public static void runCmd(DFSAdmin dfsadmin, boolean success, public void testDFSAdminRollingUpgradeCommands() throws Exception { // start a cluster final Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) { cluster.waitActive(); final Path foo = new Path("/foo"); @@ -149,8 +147,6 @@ public void testDFSAdminRollingUpgradeCommands() throws Exception { Assert.assertTrue(dfs.exists(bar)); Assert.assertTrue(dfs.exists(baz)); } - } finally { - if(cluster != null) cluster.shutdown(); } } @@ -173,114 +169,115 @@ public void testRollingUpgradeWithQJM() throws Exception { LOG.info("nn2Dir=" + nn2Dir); final Configuration conf = new HdfsConfiguration(); - final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build(); - mjc.waitActive(); - setConf(conf, nn1Dir, mjc); - - { - // Start the cluster once to generate the dfs dirs - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .manageNameDfsDirs(false) - .checkExitOnShutdown(false) - .build(); - // Shutdown the cluster before making a copy of the namenode dir to release - // all file locks, otherwise, the copy will fail on some platforms. - cluster.shutdown(); - } - - MiniDFSCluster cluster2 = null; - try { - // Start a second NN pointed to the same quorum. - // We need to copy the image dir from the first NN -- or else - // the new NN will just be rejected because of Namespace mismatch. - FileUtil.fullyDelete(nn2Dir); - FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(), - new Path(nn2Dir.getAbsolutePath()), false, conf); - - // Start the cluster again - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .format(false) - .manageNameDfsDirs(false) - .checkExitOnShutdown(false) - .build(); + try (MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build()) { + mjc.waitActive(); + setConf(conf, nn1Dir, mjc); - final Path foo = new Path("/foo"); - final Path bar = new Path("/bar"); - final Path baz = new Path("/baz"); - - final RollingUpgradeInfo info1; { - final DistributedFileSystem dfs = cluster.getFileSystem(); - dfs.mkdirs(foo); - - //start rolling upgrade - dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); - info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); - dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); - LOG.info("START\n" + info1); - - //query rolling upgrade - assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY)); - - dfs.mkdirs(bar); + // Start the cluster once to generate the dfs dirs + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .manageNameDfsDirs(false) + .checkExitOnShutdown(false) + .build(); + // Shutdown the cluster before making a copy of the namenode dir to release + // all file locks, otherwise, the copy will fail on some platforms. cluster.shutdown(); } - // cluster2 takes over QJM - final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc); - cluster2 = new MiniDFSCluster.Builder(conf2) - .numDataNodes(0) - .format(false) - .manageNameDfsDirs(false) - .build(); - final DistributedFileSystem dfs2 = cluster2.getFileSystem(); - - // Check that cluster2 sees the edits made on cluster1 - Assert.assertTrue(dfs2.exists(foo)); - Assert.assertTrue(dfs2.exists(bar)); - Assert.assertFalse(dfs2.exists(baz)); - - //query rolling upgrade in cluster2 - assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); - - dfs2.mkdirs(baz); - - LOG.info("RESTART cluster 2"); - cluster2.restartNameNode(); - assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); - Assert.assertTrue(dfs2.exists(foo)); - Assert.assertTrue(dfs2.exists(bar)); - Assert.assertTrue(dfs2.exists(baz)); - - //restart cluster with -upgrade should fail. + MiniDFSCluster cluster2 = null; try { - cluster2.restartNameNode("-upgrade"); - } catch(IOException e) { - LOG.info("The exception is expected.", e); - } + // Start a second NN pointed to the same quorum. + // We need to copy the image dir from the first NN -- or else + // the new NN will just be rejected because of Namespace mismatch. + FileUtil.fullyDelete(nn2Dir); + FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(), + new Path(nn2Dir.getAbsolutePath()), false, conf); + + // Start the cluster again + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .format(false) + .manageNameDfsDirs(false) + .checkExitOnShutdown(false) + .build(); + + final Path foo = new Path("/foo"); + final Path bar = new Path("/bar"); + final Path baz = new Path("/baz"); + + final RollingUpgradeInfo info1; + { + final DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.mkdirs(foo); + + //start rolling upgrade + dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); + dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + LOG.info("START\n" + info1); + + //query rolling upgrade + assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY)); + + dfs.mkdirs(bar); + cluster.shutdown(); + } - LOG.info("RESTART cluster 2 again"); - cluster2.restartNameNode(); - assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); - Assert.assertTrue(dfs2.exists(foo)); - Assert.assertTrue(dfs2.exists(bar)); - Assert.assertTrue(dfs2.exists(baz)); - - //finalize rolling upgrade - final RollingUpgradeInfo finalize = dfs2.rollingUpgrade( - RollingUpgradeAction.FINALIZE); - Assert.assertTrue(finalize.isFinalized()); - - LOG.info("RESTART cluster 2 with regular startup option"); - cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); - cluster2.restartNameNode(); - Assert.assertTrue(dfs2.exists(foo)); - Assert.assertTrue(dfs2.exists(bar)); - Assert.assertTrue(dfs2.exists(baz)); - } finally { - if (cluster2 != null) cluster2.shutdown(); + // cluster2 takes over QJM + final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc); + cluster2 = new MiniDFSCluster.Builder(conf2) + .numDataNodes(0) + .format(false) + .manageNameDfsDirs(false) + .build(); + final DistributedFileSystem dfs2 = cluster2.getFileSystem(); + + // Check that cluster2 sees the edits made on cluster1 + Assert.assertTrue(dfs2.exists(foo)); + Assert.assertTrue(dfs2.exists(bar)); + Assert.assertFalse(dfs2.exists(baz)); + + //query rolling upgrade in cluster2 + assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); + + dfs2.mkdirs(baz); + + LOG.info("RESTART cluster 2"); + cluster2.restartNameNode(); + assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); + Assert.assertTrue(dfs2.exists(foo)); + Assert.assertTrue(dfs2.exists(bar)); + Assert.assertTrue(dfs2.exists(baz)); + + //restart cluster with -upgrade should fail. + try { + cluster2.restartNameNode("-upgrade"); + } catch (IOException e) { + LOG.info("The exception is expected.", e); + } + + LOG.info("RESTART cluster 2 again"); + cluster2.restartNameNode(); + assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); + Assert.assertTrue(dfs2.exists(foo)); + Assert.assertTrue(dfs2.exists(bar)); + Assert.assertTrue(dfs2.exists(baz)); + + //finalize rolling upgrade + final RollingUpgradeInfo finalize = dfs2.rollingUpgrade( + RollingUpgradeAction.FINALIZE); + Assert.assertTrue(finalize.isFinalized()); + + LOG.info("RESTART cluster 2 with regular startup option"); + cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); + cluster2.restartNameNode(); + Assert.assertTrue(dfs2.exists(foo)); + Assert.assertTrue(dfs2.exists(bar)); + Assert.assertTrue(dfs2.exists(baz)); + } finally { + if (cluster2 != null) cluster2.shutdown(); + } } } @@ -408,9 +405,7 @@ private static void rollbackRollingUpgrade(Path foo, Path bar, public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { // start a cluster final Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()){ cluster.waitActive(); final DFSAdmin dfsadmin = new DFSAdmin(conf); DataNode dn = cluster.getDataNodes().get(0); @@ -431,8 +426,6 @@ public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { // ping should fail. assertEquals(-1, dfsadmin.run(args1)); - } finally { - if (cluster != null) cluster.shutdown(); } } @@ -691,7 +684,8 @@ public void testCheckpoint(int nnCount) throws IOException, InterruptedException /** * Verify that the namenode at the given index has an FSImage with a TxId up to txid-1 */ - private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex) throws InterruptedException { + private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex) + throws InterruptedException { int retries = 0; while (++retries < 5) { NNStorage storage = dfsCluster.getNamesystem(nnIndex).getFSImage() From a228eed57e5cb53c881907ed9d81075c22deecb2 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Wed, 17 Aug 2022 12:21:29 -0400 Subject: [PATCH 5/9] Make MiniQJMHACluster AutoCloseable like MIniDFSCluster This update allows MiniQJMHACluster to be used in try-with-resources, ensuring that tests are cleaned-up correctly. --- .../apache/hadoop/hdfs/TestRollingUpgrade.java | 17 +++-------------- .../hadoop/hdfs/qjournal/MiniQJMHACluster.java | 16 +++++++++++++--- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index ead9b861e612a..5efba0ef78741 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -522,9 +522,7 @@ public void testQueryWithMultipleNN() throws Exception { private void testQuery(int nnCount) throws Exception{ final Configuration conf = new Configuration(); - MiniQJMHACluster cluster = null; - try { - cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build(); + try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build()) { MiniDFSCluster dfsCluster = cluster.getDfsCluster(); dfsCluster.waitActive(); @@ -554,10 +552,6 @@ private void testQuery(int nnCount) throws Exception{ // The NN should have a copy of the fsimage in case of rollbacks. Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage() .hasRollbackFSImage()); - } finally { - if (cluster != null) { - cluster.shutdown(); - } } } @@ -648,11 +642,10 @@ public void testCheckpoint(int nnCount) throws IOException, InterruptedException conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1); - MiniQJMHACluster cluster = null; final Path foo = new Path("/foo"); - try { - cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build(); + try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount) + .build()) { MiniDFSCluster dfsCluster = cluster.getDfsCluster(); dfsCluster.waitActive(); @@ -674,10 +667,6 @@ public void testCheckpoint(int nnCount) throws IOException, InterruptedException verifyNNCheckpoint(dfsCluster, txid, i); } - } finally { - if (cluster != null) { - cluster.shutdown(); - } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index 3ece3d7e47a68..a6a5dbd5f68a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -35,7 +35,7 @@ import java.util.List; import java.util.Random; -public class MiniQJMHACluster { +public class MiniQJMHACluster implements AutoCloseable { private MiniDFSCluster cluster; private MiniJournalCluster journalCluster; private final Configuration conf; @@ -185,8 +185,18 @@ public MiniJournalCluster getJournalCluster() { return journalCluster; } - public void shutdown() throws IOException { + public void shutdown() { cluster.shutdown(); - journalCluster.shutdown(); + try { + journalCluster.shutdown(); + } catch (IOException shutdownFailure) { + LOG.warn("Exception while closing journal cluster", shutdownFailure); + } + } + + @Override + public void close() { + shutdown(); } + } From e2b11331685e5e13c840ba7b60b9627510c1bdd1 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Wed, 17 Aug 2022 12:22:58 -0400 Subject: [PATCH 6/9] Use try-with-resources for mini clusters This update ensures that tests are cleaned-up correctly. --- .../org/apache/hadoop/hdfs/TestRollingUpgrade.java | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index 5efba0ef78741..0f9ca7f17becf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -307,9 +307,7 @@ private static void checkMxBean() throws Exception { public void testRollback() throws Exception { // start a cluster final Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) { cluster.waitActive(); final Path foo = new Path("/foo"); @@ -349,8 +347,6 @@ public void testRollback() throws Exception { startRollingUpgrade(foo, bar, file, data, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster); - } finally { - if(cluster != null) cluster.shutdown(); } } @@ -558,9 +554,7 @@ private void testQuery(int nnCount) throws Exception{ @Test (timeout = 300000) public void testQueryAfterRestart() throws IOException, InterruptedException { final Configuration conf = new Configuration(); - MiniDFSCluster cluster = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); @@ -574,10 +568,6 @@ public void testQueryAfterRestart() throws IOException, InterruptedException { cluster.restartNameNodes(); dfs.rollingUpgrade(RollingUpgradeAction.QUERY); - } finally { - if (cluster != null) { - cluster.shutdown(); - } } } From 1c8f55262fd86ddd7e4fa79e49d9e488efbdd35d Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Wed, 17 Aug 2022 12:23:57 -0400 Subject: [PATCH 7/9] Use test-specific data directories Create a default HDFS configuration which has test-specific data directories. This is intended to protect against interactions between test runs that might corrupt results. Each test run's data is automatically cleaned-up by JUnit. --- .../hadoop/hdfs/TestRollingUpgrade.java | 46 +++++++++++++++---- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index 0f9ca7f17becf..9e907df7f2c9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -33,6 +33,8 @@ import javax.management.ReflectionException; import javax.management.openmbean.CompositeDataSupport; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -80,13 +82,37 @@ public static void runCmd(DFSAdmin dfsadmin, boolean success, } } + @Rule + public TemporaryFolder folder= new TemporaryFolder(); + + /** + * Create a default HDFS configuration which has test-specific data directories. This is + * intended to protect against interactions between test runs that might corrupt results. Each + * test run's data is automatically cleaned-up by JUnit. + * + * @return a default configuration with test-specific data directories + */ + public Configuration getHdfsConfiguration() throws IOException { + Configuration conf = new HdfsConfiguration(); + + // Override the file system locations with test-specific temporary folders + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + folder.newFolder("dfs/name").toString()); + conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, + folder.newFolder("dfs/namesecondary").toString()); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + folder.newFolder("dfs/data").toString()); + + return conf; + } + /** * Test DFSAdmin Upgrade Command. */ @Test public void testDFSAdminRollingUpgradeCommands() throws Exception { // start a cluster - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getHdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) { cluster.waitActive(); @@ -168,7 +194,7 @@ public void testRollingUpgradeWithQJM() throws Exception { LOG.info("nn1Dir=" + nn1Dir); LOG.info("nn2Dir=" + nn2Dir); - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getHdfsConfiguration(); try (MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build()) { mjc.waitActive(); setConf(conf, nn1Dir, mjc); @@ -306,7 +332,7 @@ private static void checkMxBean() throws Exception { @Test public void testRollback() throws Exception { // start a cluster - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getHdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) { cluster.waitActive(); @@ -400,7 +426,7 @@ private static void rollbackRollingUpgrade(Path foo, Path bar, @Test public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { // start a cluster - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getHdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()){ cluster.waitActive(); final DFSAdmin dfsadmin = new DFSAdmin(conf); @@ -451,7 +477,7 @@ private void testFinalize(int nnCount) throws Exception { private void testFinalize(int nnCount, boolean skipImageDeltaCheck) throws Exception { - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getHdfsConfiguration(); MiniQJMHACluster cluster = null; final Path foo = new Path("/foo"); final Path bar = new Path("/bar"); @@ -517,7 +543,7 @@ public void testQueryWithMultipleNN() throws Exception { } private void testQuery(int nnCount) throws Exception{ - final Configuration conf = new Configuration(); + final Configuration conf = getHdfsConfiguration(); try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build()) { MiniDFSCluster dfsCluster = cluster.getDfsCluster(); dfsCluster.waitActive(); @@ -553,7 +579,7 @@ private void testQuery(int nnCount) throws Exception{ @Test (timeout = 300000) public void testQueryAfterRestart() throws IOException, InterruptedException { - final Configuration conf = new Configuration(); + final Configuration conf = getHdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); @@ -583,7 +609,7 @@ public void testCheckpointWithMultipleNN() throws IOException, InterruptedExcept @Test(timeout = 60000) public void testRollBackImage() throws Exception { - final Configuration conf = new Configuration(); + final Configuration conf = getHdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2); @@ -628,7 +654,7 @@ public void duringUploadInProgess() } public void testCheckpoint(int nnCount) throws IOException, InterruptedException { - final Configuration conf = new Configuration(); + final Configuration conf = getHdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1); @@ -705,7 +731,7 @@ public void testCheckpointWithSNN() throws Exception { SecondaryNameNode snn = null; try { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getHdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); From 3fee70695c9ddea3a896b1f5401abd3fbaeeb5c8 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Mon, 22 Aug 2022 10:03:03 -0400 Subject: [PATCH 8/9] Fix whitespace and remove debug logger --- .../test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java | 4 ++-- .../hdfs/qjournal/server/TestGetJournalEditServlet.java | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index 9e907df7f2c9e..6e7014c42eb13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -83,7 +83,7 @@ public static void runCmd(DFSAdmin dfsadmin, boolean success, } @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); /** * Create a default HDFS configuration which has test-specific data directories. This is @@ -427,7 +427,7 @@ private static void rollbackRollingUpgrade(Path foo, Path bar, public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { // start a cluster final Configuration conf = getHdfsConfiguration(); - try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()){ + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) { cluster.waitActive(); final DFSAdmin dfsadmin = new DFSAdmin(conf); DataNode dn = cluster.getDataNodes().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java index f46491d012fa8..7d9dea0351651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java @@ -21,8 +21,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; import org.junit.BeforeClass; import org.junit.Test; @@ -43,8 +41,6 @@ public class TestGetJournalEditServlet { @BeforeClass public static void setUp() throws ServletException { - LogManager.getLogger(GetJournalEditServlet.class).setLevel(Level.DEBUG); - // Configure Hadoop CONF.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/"); CONF.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, From a0e2cac08deed9268a3269b9c2d2e4b3c7a0bd90 Mon Sep 17 00:00:00 2001 From: Steve Vaughan Jr Date: Mon, 22 Aug 2022 17:13:35 -0400 Subject: [PATCH 9/9] Restore shutdown IOException The signature for close() can't throw an exception for AutoCloseable, but the definition of shutdown() didn't have to change. Move the try-catch into close(). --- .../hadoop/hdfs/qjournal/MiniQJMHACluster.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index a6a5dbd5f68a6..0791e0ace1c0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -185,18 +185,19 @@ public MiniJournalCluster getJournalCluster() { return journalCluster; } - public void shutdown() { + public void shutdown() throws IOException { cluster.shutdown(); + journalCluster.shutdown(); + } + + @Override + public void close() { try { - journalCluster.shutdown(); + shutdown(); } catch (IOException shutdownFailure) { LOG.warn("Exception while closing journal cluster", shutdownFailure); } - } - @Override - public void close() { - shutdown(); } }