Skip to content

Commit 0007e2a

Browse files
authored
Merge branch 'apache:trunk' into YARN-7953
2 parents 70df622 + 773dd7c commit 0007e2a

File tree

27 files changed

+225
-44
lines changed

27 files changed

+225
-44
lines changed

LICENSE-binary

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ org.apache.kerby:kerby-xdr:2.0.3
337337
org.apache.kerby:token-provider:2.0.3
338338
org.apache.solr:solr-solrj:8.11.2
339339
org.apache.yetus:audience-annotations:0.5.0
340-
org.apache.zookeeper:zookeeper:3.7.2
340+
org.apache.zookeeper:zookeeper:3.8.3
341341
org.codehaus.jettison:jettison:1.5.4
342342
org.eclipse.jetty:jetty-annotations:9.4.53.v20231009
343343
org.eclipse.jetty:jetty-http:9.4.53.v20231009

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import java.util.Arrays;
2626
import java.util.List;
2727

28+
import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
2829
import org.apache.hadoop.classification.VisibleForTesting;
2930

3031
import org.apache.commons.lang3.StringUtils;
@@ -365,12 +366,17 @@ public void execute() throws IOException, NoSuchAlgorithmException {
365366
} else {
366367
password = c.readPassword("Enter alias password: ");
367368
}
368-
char[] storePassword =
369-
provider.getCredentialEntry(alias).getCredential();
370-
String beMatch =
371-
Arrays.equals(storePassword, password) ? "success" : "failed";
369+
CredentialEntry credentialEntry = provider.getCredentialEntry(alias);
370+
if(credentialEntry == null) {
371+
// Fail the password match when alias not found
372+
getOut().println("Password match failed for " + alias + ".");
373+
} else {
374+
char[] storePassword = credentialEntry.getCredential();
375+
String beMatch =
376+
Arrays.equals(storePassword, password) ? "success" : "failed";
372377

373-
getOut().println("Password match " + beMatch + " for " + alias + ".");
378+
getOut().println("Password match " + beMatch + " for " + alias + ".");
379+
}
374380
} catch (IOException e) {
375381
getOut().println("Cannot check aliases for CredentialProvider: " +
376382
provider.toString()

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,21 @@ public void testPromptForCredentialWithEmptyPasswd() throws Exception {
165165
assertTrue(outContent.toString().contains("Passwords don't match"));
166166
}
167167

168+
@Test
169+
public void testPromptForCredentialNotFound() throws Exception {
170+
String[] args1 = {"check", "credential1", "-provider",
171+
jceksProvider};
172+
ArrayList<String> password = new ArrayList<String>();
173+
password.add("p@ssw0rd");
174+
int rc = 0;
175+
CredentialShell shell = new CredentialShell();
176+
shell.setConf(new Configuration());
177+
shell.setPasswordReader(new MockPasswordReader(password));
178+
rc = shell.run(args1);
179+
assertEquals(0, rc);
180+
assertOutputContains("Password match failed for credential1.");
181+
}
182+
168183
@Test
169184
public void testPromptForCredential() throws Exception {
170185
String[] args1 = {"create", "credential1", "-provider",

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@ public void run() {
787787
scope = null;
788788
dataQueue.removeFirst();
789789
ackQueue.addLast(one);
790-
packetSendTime.put(one.getSeqno(), Time.monotonicNow());
790+
packetSendTime.put(one.getSeqno(), Time.monotonicNowNanos());
791791
dataQueue.notifyAll();
792792
}
793793
}
@@ -953,7 +953,7 @@ void waitForAckedSeqno(long seqno) throws IOException {
953953
dnodes = nodes != null ? nodes.length : 3;
954954
}
955955
int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes);
956-
long begin = Time.monotonicNow();
956+
long begin = Time.monotonicNowNanos();
957957
try {
958958
synchronized (dataQueue) {
959959
while (!streamerClosed) {
@@ -963,14 +963,14 @@ void waitForAckedSeqno(long seqno) throws IOException {
963963
}
964964
try {
965965
dataQueue.wait(1000); // when we receive an ack, we notify on
966-
long duration = Time.monotonicNow() - begin;
967-
if (duration > writeTimeout) {
966+
long duration = Time.monotonicNowNanos() - begin;
967+
if (TimeUnit.NANOSECONDS.toMillis(duration) > writeTimeout) {
968968
LOG.error("No ack received, took {}ms (threshold={}ms). "
969969
+ "File being written: {}, block: {}, "
970970
+ "Write pipeline datanodes: {}.",
971-
duration, writeTimeout, src, block, nodes);
971+
TimeUnit.NANOSECONDS.toMillis(duration), writeTimeout, src, block, nodes);
972972
throw new InterruptedIOException("No ack received after " +
973-
duration / 1000 + "s and a timeout of " +
973+
TimeUnit.NANOSECONDS.toSeconds(duration) + "s and a timeout of " +
974974
writeTimeout / 1000 + "s");
975975
}
976976
// dataQueue
@@ -984,11 +984,12 @@ void waitForAckedSeqno(long seqno) throws IOException {
984984
} catch (ClosedChannelException cce) {
985985
LOG.debug("Closed channel exception", cce);
986986
}
987-
long duration = Time.monotonicNow() - begin;
988-
if (duration > dfsclientSlowLogThresholdMs) {
987+
long duration = Time.monotonicNowNanos() - begin;
988+
if (TimeUnit.NANOSECONDS.toMillis(duration) > dfsclientSlowLogThresholdMs) {
989989
LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being"
990990
+ " written: {}, block: {}, Write pipeline datanodes: {}.",
991-
duration, dfsclientSlowLogThresholdMs, src, block, nodes);
991+
TimeUnit.NANOSECONDS.toMillis(duration), dfsclientSlowLogThresholdMs,
992+
src, block, nodes);
992993
}
993994
}
994995
}
@@ -1179,10 +1180,10 @@ public void run() {
11791180
if (ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) {
11801181
Long begin = packetSendTime.get(ack.getSeqno());
11811182
if (begin != null) {
1182-
long duration = Time.monotonicNow() - begin;
1183-
if (duration > dfsclientSlowLogThresholdMs) {
1183+
long duration = Time.monotonicNowNanos() - begin;
1184+
if (TimeUnit.NANOSECONDS.toMillis(duration) > dfsclientSlowLogThresholdMs) {
11841185
LOG.info("Slow ReadProcessor read fields for block " + block
1185-
+ " took " + duration + "ms (threshold="
1186+
+ " took " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms (threshold="
11861187
+ dfsclientSlowLogThresholdMs + "ms); ack: " + ack
11871188
+ ", targets: " + Arrays.asList(targets));
11881189
}

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ public Quota(Router router, RouterRpcServer server) {
7575
* @param storagespaceQuota Storage space quota.
7676
* @param type StorageType that the space quota is intended to be set on.
7777
* @param checkMountEntry whether to check the path is a mount entry.
78-
* @throws AccessControlException If the quota system is disabled or if
78+
* @throws IOException If the quota system is disabled or if
7979
* checkMountEntry is true and the path is a mount entry.
8080
*/
8181
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,10 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic {
279279
FEDERATION_ROUTER_PREFIX + "safemode.expiration";
280280
public static final long DFS_ROUTER_SAFEMODE_EXPIRATION_DEFAULT =
281281
3 * DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT;
282+
public static final String DFS_ROUTER_SAFEMODE_CHECKPERIOD_MS =
283+
FEDERATION_ROUTER_PREFIX + "safemode.checkperiod";
284+
public static final long DFS_ROUTER_SAFEMODE_CHECKPERIOD_MS_DEFAULT =
285+
TimeUnit.SECONDS.toMillis(5);
282286

283287
// HDFS Router-based federation mount table entries
284288
/** Maximum number of cache entries to have. */

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ protected void serviceInit(Configuration conf) throws Exception {
133133

134134
// Use same interval as cache update service
135135
this.setIntervalMs(conf.getTimeDuration(
136-
RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS,
137-
RBFConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS_DEFAULT,
136+
RBFConfigKeys.DFS_ROUTER_SAFEMODE_CHECKPERIOD_MS,
137+
RBFConfigKeys.DFS_ROUTER_SAFEMODE_CHECKPERIOD_MS_DEFAULT,
138138
TimeUnit.MILLISECONDS));
139139

140140
this.startupInterval = conf.getTimeDuration(

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ public StateStoreService() {
138138
* Initialize the State Store and the connection to the back-end.
139139
*
140140
* @param config Configuration for the State Store.
141-
* @throws IOException Cannot create driver for the State Store.
141+
* @throws Exception Cannot create driver for the State Store.
142142
*/
143143
@Override
144144
protected void serviceInit(Configuration config) throws Exception {
@@ -239,7 +239,6 @@ protected void serviceStop() throws Exception {
239239
*
240240
* @param <T> Type of the records stored.
241241
* @param clazz Class of the record store to track.
242-
* @return New record store.
243242
* @throws ReflectiveOperationException
244243
*/
245244
private <T extends RecordStore<?>> void addRecordStore(
@@ -428,7 +427,6 @@ public void refreshCaches(boolean force) {
428427
result = cachedStore.loadCache(force);
429428
} catch (IOException e) {
430429
LOG.error("Error updating cache for {}", cacheName, e);
431-
result = false;
432430
}
433431
if (!result) {
434432
success = false;

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ public MountTableStoreImpl(StateStoreDriver driver) {
7070
*
7171
* @param src mount entry being accessed
7272
* @param action type of action being performed on the mount entry
73-
* @throws AccessControlException if mount table cannot be accessed
73+
* @throws IOException if mount table cannot be accessed
7474
*/
7575
private void checkMountTableEntryPermission(String src, FsAction action)
7676
throws IOException {
@@ -90,7 +90,7 @@ private void checkMountTableEntryPermission(String src, FsAction action)
9090
* Check parent path permission recursively. It needs WRITE permission
9191
* of the nearest parent entry and other EXECUTE permission.
9292
* @param src mount entry being checked
93-
* @throws AccessControlException if mount table cannot be accessed
93+
* @throws IOException if mount table cannot be accessed
9494
*/
9595
private void checkMountTablePermission(final String src) throws IOException {
9696
String parent = src.substring(0, src.lastIndexOf(Path.SEPARATOR));

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ public FederationProtocolPBTranslator(Class<P> protoType) {
5454
* the proto handler this translator holds.
5555
*/
5656
@SuppressWarnings("unchecked")
57-
public void setProto(Message p) {
57+
public void setProto(Message p) throws IllegalArgumentException {
5858
if (protoClass.isInstance(p)) {
5959
if (this.builder != null) {
6060
// Merge with builder

0 commit comments

Comments
 (0)