Skip to content

Commit e6547fb

Browse files
committed
Address the comments
1 parent c912a2a commit e6547fb

File tree

4 files changed

+9
-8
lines changed

4 files changed

+9
-8
lines changed

yarn/src/main/scala/org/apache/spark/deploy/yarn/ClientArguments.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ private[spark] class ClientArguments(args: Array[String], sparkConf: SparkConf)
8181
.orNull
8282
// If dynamic allocation is enabled, start at the configured initial number of executors.
8383
// Default to minExecutors if no initialExecutors is set.
84-
numExecutors = YarnSparkHadoopUtil.getTargetExecutorNumber(sparkConf)
84+
numExecutors = YarnSparkHadoopUtil.getInitialTargetExecutorNumber(sparkConf)
8585
principal = Option(principal)
8686
.orElse(sparkConf.getOption("spark.yarn.principal"))
8787
.orNull

yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ private[yarn] class YarnAllocator(
8888
private var executorIdCounter = 0
8989
@volatile private var numExecutorsFailed = 0
9090

91-
@volatile private var targetNumExecutors = YarnSparkHadoopUtil.getTargetExecutorNumber(sparkConf)
91+
@volatile private var targetNumExecutors =
92+
YarnSparkHadoopUtil.getInitialTargetExecutorNumber(sparkConf)
9293

9394
// Executor loss reason requests that are pending - maps from executor ID for inquiry to a
9495
// list of requesters that should be responded to once we find out why the given executor

yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ object YarnSparkHadoopUtil {
316316
}
317317

318318
/** Get the initial target number of executors depends on dynamic allocation is enabled or not */
319-
def getTargetExecutorNumber(conf: SparkConf): Int = {
319+
def getInitialTargetExecutorNumber(conf: SparkConf): Int = {
320320
if (Utils.isDynamicAllocationEnabled(conf)) {
321321
val minNumExecutors = conf.getInt("spark.dynamicAllocation.minExecutors", 0)
322322
val initialNumExecutors =
@@ -328,10 +328,10 @@ object YarnSparkHadoopUtil {
328328

329329
initialNumExecutors
330330
} else {
331-
var targetNumExecutors = DEFAULT_NUMBER_EXECUTORS
332-
if (System.getenv("SPARK_EXECUTOR_INSTANCES") != null) {
333-
targetNumExecutors = IntParam.unapply(System.getenv("SPARK_EXECUTOR_INSTANCES"))
334-
.getOrElse(targetNumExecutors)
331+
val targetNumExecutors = if (System.getenv("SPARK_EXECUTOR_INSTANCES") != null) {
332+
IntParam.unapply(System.getenv("SPARK_EXECUTOR_INSTANCES")).get
333+
} else {
334+
DEFAULT_NUMBER_EXECUTORS
335335
}
336336
// System property can override environment variable.
337337
conf.getInt("spark.executor.instances", targetNumExecutors)

yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ private[spark] class YarnClusterSchedulerBackend(
3232

3333
override def start() {
3434
super.start()
35-
totalExpectedExecutors = YarnSparkHadoopUtil.getTargetExecutorNumber(sc.conf)
35+
totalExpectedExecutors = YarnSparkHadoopUtil.getInitialTargetExecutorNumber(sc.conf)
3636
}
3737

3838
override def applicationId(): String =

0 commit comments

Comments
 (0)