Skip to content

Commit 9c4e9ac

Browse files
authored
Merge branch 'trunk' into YARN-11433-V2
2 parents 636c01d + bf605c8 commit 9c4e9ac

File tree

109 files changed

+2828
-201
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

109 files changed

+2828
-201
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ public static RpcMetrics create(Server server, Configuration conf) {
141141
MutableCounterLong rpcAuthorizationSuccesses;
142142
@Metric("Number of client backoff requests")
143143
MutableCounterLong rpcClientBackoff;
144-
@Metric("Number of Slow RPC calls")
144+
@Metric("Number of slow RPC calls")
145145
MutableCounterLong rpcSlowCalls;
146146
@Metric("Number of requeue calls")
147147
MutableCounterLong rpcRequeueCalls;

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -549,7 +549,17 @@ public HadoopZookeeperFactory(String zkPrincipal, String kerberosPrincipal,
549549
public ZooKeeper newZooKeeper(String connectString, int sessionTimeout,
550550
Watcher watcher, boolean canBeReadOnly
551551
) throws Exception {
552-
ZKClientConfig zkClientConfig = new ZKClientConfig();
552+
return this.newZooKeeper(connectString, sessionTimeout,
553+
watcher, canBeReadOnly, new ZKClientConfig());
554+
}
555+
556+
@Override
557+
public ZooKeeper newZooKeeper(String connectString, int sessionTimeout,
558+
Watcher watcher, boolean canBeReadOnly, ZKClientConfig zkClientConfig
559+
) throws Exception {
560+
if (zkClientConfig == null) {
561+
zkClientConfig = new ZKClientConfig();
562+
}
553563
if (zkPrincipal != null) {
554564
LOG.info("Configuring zookeeper to use {} as the server principal",
555565
zkPrincipal);

hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,16 +74,21 @@ The default timeunit used for RPC metrics is milliseconds (as per the below desc
7474
| `SentBytes` | Total number of sent bytes |
7575
| `RpcQueueTimeNumOps` | Total number of RPC calls |
7676
| `RpcQueueTimeAvgTime` | Average queue time in milliseconds |
77-
| `RpcLockWaitTimeNumOps` | Total number of RPC call (same as RpcQueueTimeNumOps) |
77+
| `RpcLockWaitTimeNumOps` | Total number of RPC calls (same as RpcQueueTimeNumOps) |
7878
| `RpcLockWaitTimeAvgTime` | Average time waiting for lock acquisition in milliseconds |
7979
| `RpcProcessingTimeNumOps` | Total number of RPC calls (same to RpcQueueTimeNumOps) |
8080
| `RpcProcessingAvgTime` | Average Processing time in milliseconds |
81+
| `DeferredRpcProcessingTimeNumOps` | Total number of Deferred RPC calls |
82+
| `DeferredRpcProcessingAvgTime` | Average Deferred Processing time in milliseconds |
83+
| `RpcResponseTimeNumOps` | Total number of RPC calls (same to RpcQueueTimeNumOps) |
84+
| `RpcResponseAvgTime` | Average Response time in milliseconds |
8185
| `RpcAuthenticationFailures` | Total number of authentication failures |
8286
| `RpcAuthenticationSuccesses` | Total number of authentication successes |
8387
| `RpcAuthorizationFailures` | Total number of authorization failures |
8488
| `RpcAuthorizationSuccesses` | Total number of authorization successes |
8589
| `RpcClientBackoff` | Total number of client backoff requests |
8690
| `RpcSlowCalls` | Total number of slow RPC calls |
91+
| `RpcRequeueCalls` | Total number of requeue RPC calls |
8792
| `RpcCallsSuccesses` | Total number of RPC calls that are successfully processed |
8893
| `NumOpenConnections` | Current number of open connections |
8994
| `NumInProcessHandler` | Current number of handlers on working |
@@ -107,6 +112,18 @@ The default timeunit used for RPC metrics is milliseconds (as per the below desc
107112
| `rpcLockWaitTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
108113
| `rpcLockWaitTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
109114
| `rpcLockWaitTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC lock wait time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
115+
| `rpcResponseTime`*num*`sNumOps` | Shows total number of RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
116+
| `rpcResponseTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
117+
| `rpcResponseTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
118+
| `rpcResponseTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
119+
| `rpcResponseTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
120+
| `rpcResponseTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of RPC response time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
121+
| `deferredRpcProcessingTime`*num*`sNumOps` | Shows total number of Deferred RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
122+
| `deferredRpcProcessingTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
123+
| `deferredRpcProcessingTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
124+
| `deferredRpcProcessingTime`*num*`s90thPercentileLatency` | Shows the 90th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
125+
| `deferredRpcProcessingTime`*num*`s95thPercentileLatency` | Shows the 95th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
126+
| `deferredRpcProcessingTime`*num*`s99thPercentileLatency` | Shows the 99th percentile of Deferred RPC processing time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
110127
| `TotalRequests` | Total num of requests served by the RPC server. |
111128
| `TotalRequestsPerSeconds` | Total num of requests per second served by the RPC server. |
112129

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,15 @@
2222
import static org.junit.Assert.assertNull;
2323
import static org.junit.Assert.assertTrue;
2424

25+
import java.util.ArrayList;
2526
import java.util.Arrays;
2627
import java.util.List;
2728

2829
import javax.security.auth.login.AppConfigurationEntry;
30+
31+
import org.apache.curator.framework.CuratorFramework;
32+
import org.apache.curator.framework.CuratorFrameworkFactory;
33+
import org.apache.curator.retry.RetryNTimes;
2934
import org.apache.curator.test.TestingServer;
3035
import org.apache.hadoop.conf.Configuration;
3136
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -193,6 +198,36 @@ public void testJaasConfiguration() throws Exception {
193198
}
194199
}
195200

201+
@Test
202+
public void testCuratorFrameworkFactory() throws Exception{
203+
// By not explicitly calling the NewZooKeeper method validate that the Curator override works.
204+
ZKClientConfig zkClientConfig = new ZKClientConfig();
205+
Configuration conf = new Configuration();
206+
conf.set(CommonConfigurationKeys.ZK_ADDRESS, this.server.getConnectString());
207+
int numRetries = conf.getInt(CommonConfigurationKeys.ZK_NUM_RETRIES,
208+
CommonConfigurationKeys.ZK_NUM_RETRIES_DEFAULT);
209+
int zkSessionTimeout = conf.getInt(CommonConfigurationKeys.ZK_TIMEOUT_MS,
210+
CommonConfigurationKeys.ZK_TIMEOUT_MS_DEFAULT);
211+
int zkRetryInterval = conf.getInt(
212+
CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS,
213+
CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS_DEFAULT);
214+
RetryNTimes retryPolicy = new RetryNTimes(numRetries, zkRetryInterval);
215+
216+
CuratorFramework client = CuratorFrameworkFactory.builder()
217+
.connectString(conf.get(CommonConfigurationKeys.ZK_ADDRESS))
218+
.zkClientConfig(zkClientConfig)
219+
.sessionTimeoutMs(zkSessionTimeout).retryPolicy(retryPolicy)
220+
.authorization(new ArrayList<>())
221+
.zookeeperFactory(new ZKCuratorManager.HadoopZookeeperFactory(
222+
"foo1", "bar1", "bar1.keytab", false,
223+
new ZKCuratorManager.TruststoreKeystore(conf))
224+
225+
).build();
226+
client.start();
227+
validateJaasConfiguration(ZKCuratorManager.HadoopZookeeperFactory.JAAS_CLIENT_ENTRY,
228+
"bar1", "bar1.keytab", client.getZookeeperClient().getZooKeeper());
229+
}
230+
196231
private void validateJaasConfiguration(String clientConfig, String principal, String keytab,
197232
ZooKeeper zk) {
198233
assertEquals("Validate that expected clientConfig is set in ZK config", clientConfig,

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ public enum OpType {
7575
GET_STORAGE_POLICIES("op_get_storage_policies"),
7676
GET_STORAGE_POLICY("op_get_storage_policy"),
7777
GET_TRASH_ROOT("op_get_trash_root"),
78+
GET_TRASH_ROOTS("op_get_trash_roots"),
7879
GET_XATTR("op_get_xattr"),
7980
LIST_CACHE_DIRECTIVE("op_list_cache_directive"),
8081
LIST_CACHE_POOL("op_list_cache_pool"),

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3612,6 +3612,8 @@ public Path getTrashRoot(Path path) {
36123612
*/
36133613
@Override
36143614
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
3615+
statistics.incrementReadOps(1);
3616+
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOTS);
36153617
Set<FileStatus> ret = new HashSet<>();
36163618
// Get normal trash roots
36173619
ret.addAll(super.getTrashRoots(allUsers));

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import org.apache.hadoop.classification.VisibleForTesting;
2424
import org.apache.hadoop.fs.BlockLocation;
25+
import org.apache.hadoop.fs.Path;
2526
import org.apache.hadoop.util.Preconditions;
2627
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
2728
import org.apache.hadoop.fs.ContentSummary;
@@ -877,6 +878,40 @@ public static Map<String, String> getErasureCodeCodecs(Map<?, ?> json) {
877878
return map;
878879
}
879880

881+
public static Collection<FileStatus> getTrashRoots(Map<?, ?> json) {
882+
List<?> objs = (List<?>) json.get("Paths");
883+
if (objs != null) {
884+
FileStatus[] trashRoots = new FileStatus[objs.size()];
885+
for (int i = 0; i < objs.size(); i++) {
886+
Map<?, ?> m = (Map<?, ?>) objs.get(i);
887+
trashRoots[i] = toFileStatus(m);
888+
}
889+
return Arrays.asList(trashRoots);
890+
}
891+
return new ArrayList<FileStatus>(0);
892+
}
893+
894+
public static FileStatus toFileStatus(Map<?, ?> json) {
895+
Path path = new Path(getString(json, "path", ""));
896+
long length = getLong(json, "length", 0);
897+
boolean isdir = getBoolean(json, "isdir", false);
898+
short replication = (short) getInt(json, "block_replication", -1);
899+
long blockSize = getLong(json, "blocksize", 256);
900+
long modificationTime = getLong(json, "modification_time", 0);
901+
long accessTime = getLong(json, "access_time", 0);
902+
String permString = getString(json, "permission", null);
903+
FsPermission permission = toFsPermission(permString);
904+
String owner = getString(json, "owner", null);
905+
String group = getString(json, "group", null);
906+
if (json.get("symlink") != null) {
907+
Path symlink = new Path((String) json.get("symlink"));
908+
return new FileStatus(length, isdir, replication, blockSize, modificationTime,
909+
accessTime, permission, owner, group, symlink, path);
910+
}
911+
return new FileStatus(length, isdir, replication, blockSize, modificationTime,
912+
accessTime, permission, owner, group, path);
913+
}
914+
880915
private static List<SnapshotDiffReport.DiffReportEntry> toDiffList(
881916
List<?> objs) {
882917
if (objs == null) {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1956,6 +1956,26 @@ String decodeResponse(Map<?, ?> json) throws IOException {
19561956
}
19571957
}
19581958

1959+
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
1960+
statistics.incrementReadOps(1);
1961+
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOTS);
1962+
1963+
final HttpOpParam.Op op = GetOpParam.Op.GETTRASHROOTS;
1964+
try {
1965+
Collection<FileStatus> trashRoots =
1966+
new FsPathResponseRunner<Collection<FileStatus>>(op, null,
1967+
new AllUsersParam(allUsers)) {
1968+
@Override
1969+
Collection<FileStatus> decodeResponse(Map<?, ?> json) throws IOException {
1970+
return JsonUtilClient.getTrashRoots(json);
1971+
}
1972+
}.run();
1973+
return trashRoots;
1974+
} catch (IOException e) {
1975+
return super.getTrashRoots(allUsers);
1976+
}
1977+
}
1978+
19591979
@Override
19601980
public void access(final Path path, final FsAction mode) throws IOException {
19611981
final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.web.resources;
19+
20+
/** AllUsers parameter. */
21+
public class AllUsersParam extends BooleanParam {
22+
/** Parameter name. */
23+
public static final String NAME = "allusers";
24+
/** Default parameter value. */
25+
public static final String DEFAULT = FALSE;
26+
27+
private static final Domain DOMAIN = new Domain(NAME);
28+
29+
/**
30+
* Constructor.
31+
* @param value the parameter value.
32+
*/
33+
public AllUsersParam(final Boolean value) {
34+
super(DOMAIN, value);
35+
}
36+
37+
/**
38+
* Constructor.
39+
* @param str a string representation of the parameter value.
40+
*/
41+
public AllUsersParam(final String str) {
42+
this(DOMAIN.parse(str));
43+
}
44+
45+
@Override
46+
public String getName() {
47+
return NAME;
48+
}
49+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ public enum Op implements HttpOpParam.Op {
6969
GETSTATUS(false, HttpURLConnection.HTTP_OK),
7070
GETECPOLICIES(false, HttpURLConnection.HTTP_OK),
7171
GETECCODECS(false, HttpURLConnection.HTTP_OK),
72+
GETTRASHROOTS(false, HttpURLConnection.HTTP_OK),
7273
GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
7374

7475
final boolean redirect;

0 commit comments

Comments
 (0)