Skip to content

Commit d781113

Browse files
authored
HBASE-26204 Obtain credential for VerifyReplication with peerQuorumAddress (#3591)
Signed-off-by: Rushabh Shah <[email protected]> Signed-off-by: Wellington Chevreuil <[email protected]>
1 parent 6ed03d9 commit d781113

File tree

3 files changed

+234
-9
lines changed

3 files changed

+234
-9
lines changed

hbase-mapreduce/pom.xml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,11 @@
226226
<type>test-jar</type>
227227
<scope>test</scope>
228228
</dependency>
229+
<dependency>
230+
<groupId>org.apache.hadoop</groupId>
231+
<artifactId>hadoop-minikdc</artifactId>
232+
<scope>test</scope>
233+
</dependency>
229234
<dependency>
230235
<groupId>org.mockito</groupId>
231236
<artifactId>mockito-core</artifactId>

hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java

Lines changed: 63 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,8 @@ private void logFailRowAndIncreaseCounter(Context context, Counters counter, Res
270270
if (!sourceResult.isEmpty()) {
271271
context.getCounter(Counters.GOODROWS).increment(1);
272272
if (verbose) {
273-
LOG.info("Good row key (with recompare): " + delimiter + Bytes.toStringBinary(row.getRow())
273+
LOG.info("Good row key (with recompare): " + delimiter +
274+
Bytes.toStringBinary(row.getRow())
274275
+ delimiter);
275276
}
276277
}
@@ -480,12 +481,16 @@ public Job createSubmittableJob(Configuration conf, String[] args)
480481
TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job);
481482
}
482483

484+
Configuration peerClusterConf;
483485
if (peerId != null) {
484486
assert peerConfigPair != null;
485-
Configuration peerClusterConf = peerConfigPair.getSecond();
486-
// Obtain the auth token from peer cluster
487-
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
487+
peerClusterConf = peerConfigPair.getSecond();
488+
} else {
489+
peerClusterConf = HBaseConfiguration.createClusterConf(conf,
490+
peerQuorumAddress, PEER_CONFIG_PREFIX);
488491
}
492+
// Obtain the auth token from peer cluster
493+
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
489494

490495
job.setOutputFormatClass(NullOutputFormat.class);
491496
job.setNumReduceTasks(0);
@@ -668,7 +673,8 @@ public boolean doCommandLine(final String[] args) {
668673
// This is to avoid making recompare calls to source/peer tables when snapshots are used
669674
if ((sourceSnapshotName != null || peerSnapshotName != null) && sleepMsBeforeReCompare > 0) {
670675
printUsage(
671-
"Using sleepMsBeforeReCompare along with snapshots is not allowed as snapshots are immutable");
676+
"Using sleepMsBeforeReCompare along with snapshots is not allowed as snapshots are"
677+
+ " immutable");
672678
return false;
673679
}
674680

@@ -708,8 +714,8 @@ private static void printUsage(final String errorMsg) {
708714
System.err.println(" without endtime means from starttime to forever");
709715
System.err.println(" endtime end of the time range");
710716
System.err.println(" versions number of cell versions to verify");
711-
System.err.println(" batch batch count for scan, " +
712-
"note that result row counts will no longer be actual number of rows when you use this option");
717+
System.err.println(" batch batch count for scan, note that"
718+
+ " result row counts will no longer be actual number of rows when you use this option");
713719
System.err.println(" raw includes raw scan if given in options");
714720
System.err.println(" families comma-separated list of families to copy");
715721
System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on ");
@@ -726,16 +732,64 @@ private static void printUsage(final String errorMsg) {
726732
System.err.println(" peerHBaseRootAddress Peer cluster HBase root location");
727733
System.err.println();
728734
System.err.println("Args:");
729-
System.err.println(" peerid Id of the peer used for verification, must match the one given for replication");
735+
System.err.println(" peerid Id of the peer used for verification,"
736+
+ " must match the one given for replication");
730737
System.err.println(" peerQuorumAddress quorumAdress of the peer used for verification. The "
731738
+ "format is zk_quorum:zk_port:zk_hbase_path");
732739
System.err.println(" tablename Name of the table to verify");
733740
System.err.println();
734741
System.err.println("Examples:");
735-
System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
742+
System.err.println(
743+
" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
736744
System.err.println(" $ hbase " +
737745
"org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" +
738746
" --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
747+
System.err.println();
748+
System.err.println(
749+
" To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b");
750+
System.err.println(" Assume quorum address for cluster-b is"
751+
+ " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b");
752+
System.err.println(
753+
" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" +
754+
" cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:"
755+
+ "2181:/cluster-b \\\n" +
756+
" TestTable");
757+
System.err.println();
758+
System.err.println(
759+
" To verify the data in TestTable between the secured cluster runs VerifyReplication"
760+
+ " and insecure cluster-b");
761+
System.err.println(
762+
" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" +
763+
" -D verifyrep.peer.hbase.security.authentication=simple \\\n" +
764+
" cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:"
765+
+ "2181:/cluster-b \\\n" +
766+
" TestTable");
767+
System.err.println();
768+
System.err.println(" To verify the data in TestTable between" +
769+
" the secured cluster runs VerifyReplication and secured cluster-b");
770+
System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" +
771+
", for master and regionserver kerberos principal from another cluster");
772+
System.err.println(
773+
" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" +
774+
" -D verifyrep.peer.hbase.regionserver.kerberos.principal="
775+
+ "cluster-b/[email protected] \\\n" +
776+
" -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/[email protected] \\\n" +
777+
" cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:"
778+
+ "2181:/cluster-b \\\n" +
779+
" TestTable");
780+
System.err.println();
781+
System.err.println(
782+
" To verify the data in TestTable between the insecure cluster runs VerifyReplication"
783+
+ " and secured cluster-b");
784+
System.err.println(
785+
" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" +
786+
" -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" +
787+
" -D verifyrep.peer.hbase.regionserver.kerberos.principal="
788+
+ "cluster-b/[email protected] \\\n" +
789+
" -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/[email protected] \\\n" +
790+
" cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:"
791+
+ "2181:/cluster-b \\\n" +
792+
" TestTable");
739793
}
740794

741795
@Override
Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
/**
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
18+
*/
19+
package org.apache.hadoop.hbase.replication;
20+
21+
import static org.junit.Assert.assertEquals;
22+
import java.io.File;
23+
import java.io.IOException;
24+
import java.util.Arrays;
25+
import java.util.Collection;
26+
import java.util.function.Supplier;
27+
import org.apache.hadoop.conf.Configuration;
28+
import org.apache.hadoop.hbase.HBaseClassTestRule;
29+
import org.apache.hadoop.hbase.HBaseTestingUtil;
30+
import org.apache.hadoop.hbase.client.Admin;
31+
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
32+
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
33+
import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
34+
import org.apache.hadoop.hbase.security.access.AccessController;
35+
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
36+
import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
37+
import org.apache.hadoop.hbase.security.token.TokenProvider;
38+
import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil;
39+
import org.apache.hadoop.hbase.testclassification.LargeTests;
40+
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
41+
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
42+
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
43+
import org.apache.hadoop.io.Text;
44+
import org.apache.hadoop.mapreduce.Job;
45+
import org.apache.hadoop.minikdc.MiniKdc;
46+
import org.apache.hadoop.security.Credentials;
47+
import org.apache.hadoop.security.UserGroupInformation;
48+
import org.apache.hadoop.security.token.Token;
49+
import org.apache.hadoop.security.token.TokenIdentifier;
50+
import org.junit.AfterClass;
51+
import org.junit.BeforeClass;
52+
import org.junit.ClassRule;
53+
import org.junit.Test;
54+
import org.junit.experimental.categories.Category;
55+
import org.junit.runner.RunWith;
56+
import org.junit.runners.Parameterized;
57+
import org.junit.runners.Parameterized.Parameter;
58+
import org.junit.runners.Parameterized.Parameters;
59+
60+
@Category({ ReplicationTests.class, LargeTests.class })
61+
@RunWith(Parameterized.class)
62+
public class TestVerifyReplicationSecureClusterCredentials {
63+
@ClassRule
64+
public static final HBaseClassTestRule CLASS_RULE =
65+
HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class);
66+
67+
private static MiniKdc KDC;
68+
private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil();
69+
private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil();
70+
71+
private static final File KEYTAB_FILE =
72+
new File(UTIL1.getDataTestDir("keytab").toUri().getPath());
73+
74+
private static final String LOCALHOST = "localhost";
75+
private static String CLUSTER_PRINCIPAL;
76+
private static String FULL_USER_PRINCIPAL;
77+
private static String HTTP_PRINCIPAL;
78+
79+
private static void setUpKdcServer() throws Exception {
80+
KDC = UTIL1.setupMiniKdc(KEYTAB_FILE);
81+
String username = UserGroupInformation.getLoginUser().getShortUserName();
82+
String userPrincipal = username + '/' + LOCALHOST;
83+
CLUSTER_PRINCIPAL = userPrincipal;
84+
FULL_USER_PRINCIPAL = userPrincipal + '@' + KDC.getRealm();
85+
HTTP_PRINCIPAL = "HTTP/" + LOCALHOST;
86+
KDC.createPrincipal(KEYTAB_FILE, CLUSTER_PRINCIPAL, HTTP_PRINCIPAL);
87+
}
88+
89+
private static void setupCluster(HBaseTestingUtil util) throws Exception {
90+
Configuration conf = util.getConfiguration();
91+
92+
SecureTestUtil.enableSecurity(conf);
93+
VisibilityTestUtil.enableVisiblityLabels(conf);
94+
SecureTestUtil.verifyConfiguration(conf);
95+
96+
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
97+
AccessController.class.getName() + ',' + TokenProvider.class.getName());
98+
99+
HBaseKerberosUtils.setSecuredConfiguration(conf,
100+
CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), HTTP_PRINCIPAL + '@' + KDC.getRealm());
101+
102+
util.startMiniCluster();
103+
}
104+
105+
/**
106+
* Sets the security firstly for getting the correct default realm.
107+
*/
108+
@BeforeClass
109+
public static void beforeClass() throws Exception {
110+
setUpKdcServer();
111+
setupCluster(UTIL1);
112+
setupCluster(UTIL2);
113+
114+
try (Admin admin = UTIL1.getAdmin()) {
115+
admin.addReplicationPeer("1", ReplicationPeerConfig.newBuilder()
116+
.setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration()))
117+
.putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL,
118+
UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL))
119+
.putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL,
120+
UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL))
121+
.build());
122+
}
123+
}
124+
125+
@AfterClass
126+
public static void cleanup() throws IOException {
127+
UTIL1.shutdownMiniCluster();
128+
UTIL2.shutdownMiniCluster();
129+
}
130+
131+
@Parameters
132+
public static Collection<Supplier<String>> peer() {
133+
return Arrays.asList(
134+
() -> "1",
135+
() -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())
136+
);
137+
}
138+
139+
@Parameter
140+
public Supplier<String> peer;
141+
142+
@Test
143+
@SuppressWarnings("unchecked")
144+
public void testJobCredentials() throws Exception {
145+
Job job = new VerifyReplication().createSubmittableJob(
146+
new Configuration(UTIL1.getConfiguration()),
147+
new String[] {
148+
peer.get(),
149+
"table"
150+
});
151+
152+
Credentials credentials = job.getCredentials();
153+
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
154+
assertEquals(2, tokens.size());
155+
156+
String clusterId1 = ZKClusterId.readClusterIdZNode(UTIL1.getZooKeeperWatcher());
157+
Token<AuthenticationTokenIdentifier> tokenForCluster1 =
158+
(Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId1));
159+
assertEquals(FULL_USER_PRINCIPAL, tokenForCluster1.decodeIdentifier().getUsername());
160+
161+
String clusterId2 = ZKClusterId.readClusterIdZNode(UTIL2.getZooKeeperWatcher());
162+
Token<AuthenticationTokenIdentifier> tokenForCluster2 =
163+
(Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId2));
164+
assertEquals(FULL_USER_PRINCIPAL, tokenForCluster2.decodeIdentifier().getUsername());
165+
}
166+
}

0 commit comments

Comments
 (0)