diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala index e60be5d5a651f..30b542eefb60b 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala @@ -715,7 +715,7 @@ private[spark] class SparkSubmit extends Logging { if (opt.value != null && (deployMode & opt.deployMode) != 0 && (clusterManager & opt.clusterManager) != 0) { - if (opt.clOption != null) { childArgs += (opt.clOption, opt.value) } + if (opt.clOption != null) { childArgs += opt.clOption += opt.value } if (opt.confKey != null) { if (opt.mergeFn.isDefined && sparkConf.contains(opt.confKey)) { sparkConf.set(opt.confKey, opt.mergeFn.get.apply(sparkConf.get(opt.confKey), opt.value)) @@ -747,15 +747,15 @@ private[spark] class SparkSubmit extends Logging { if (args.isStandaloneCluster) { if (args.useRest) { childMainClass = REST_CLUSTER_SUBMIT_CLASS - childArgs += (args.primaryResource, args.mainClass) + childArgs += args.primaryResource += args.mainClass } else { // In legacy standalone cluster mode, use Client as a wrapper around the user class childMainClass = STANDALONE_CLUSTER_SUBMIT_CLASS if (args.supervise) { childArgs += "--supervise" } - Option(args.driverMemory).foreach { m => childArgs += ("--memory", m) } - Option(args.driverCores).foreach { c => childArgs += ("--cores", c) } + Option(args.driverMemory).foreach { m => childArgs += "--memory" += m } + Option(args.driverCores).foreach { c => childArgs += "--cores" += c } childArgs += "launch" - childArgs += (args.master, args.primaryResource, args.mainClass) + childArgs += args.master += args.primaryResource += args.mainClass } if (args.childArgs != null) { childArgs ++= args.childArgs @@ -777,20 +777,20 @@ private[spark] class SparkSubmit extends Logging { if (isYarnCluster) { childMainClass = YARN_CLUSTER_SUBMIT_CLASS if (args.isPython) { - childArgs += ("--primary-py-file", args.primaryResource) - childArgs += ("--class", "org.apache.spark.deploy.PythonRunner") + childArgs += "--primary-py-file" += args.primaryResource + childArgs += "--class" += "org.apache.spark.deploy.PythonRunner" } else if (args.isR) { val mainFile = new Path(args.primaryResource).getName - childArgs += ("--primary-r-file", mainFile) - childArgs += ("--class", "org.apache.spark.deploy.RRunner") + childArgs += "--primary-r-file" += mainFile + childArgs += "--class" += "org.apache.spark.deploy.RRunner" } else { if (args.primaryResource != SparkLauncher.NO_RESOURCE) { - childArgs += ("--jar", args.primaryResource) + childArgs += "--jar" += args.primaryResource } - childArgs += ("--class", args.mainClass) + childArgs += "--class" += args.mainClass } if (args.childArgs != null) { - args.childArgs.foreach { arg => childArgs += ("--arg", arg) } + args.childArgs.foreach { arg => childArgs += "--arg" += arg } } } @@ -813,12 +813,12 @@ private[spark] class SparkSubmit extends Logging { } if (args.childArgs != null) { args.childArgs.foreach { arg => - childArgs += ("--arg", arg) + childArgs += "--arg" += arg } } // Pass the proxyUser to the k8s app so it is possible to add it to the driver args if (args.proxyUser != null) { - childArgs += ("--proxy-user", args.proxyUser) + childArgs += "--proxy-user" += args.proxyUser } } diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala b/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala index c04214de4ddc6..d1190ca46c2a8 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala @@ -81,14 +81,15 @@ object CommandUtils extends Logging { var newEnvironment = if (libraryPathEntries.nonEmpty && libraryPathName.nonEmpty) { val libraryPaths = libraryPathEntries ++ cmdLibraryPath ++ env.get(libraryPathName) - command.environment + ((libraryPathName, libraryPaths.mkString(File.pathSeparator))) + command.environment ++ Map(libraryPathName -> libraryPaths.mkString(File.pathSeparator)) } else { command.environment } // set auth secret to env variable if needed if (securityMgr.isAuthenticationEnabled()) { - newEnvironment += (SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey()) + newEnvironment = newEnvironment ++ + Map(SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey()) } // set SSL env variables if needed newEnvironment ++= securityMgr.getEnvironmentForSslRpcPasswords diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala index 3defd4b1a7d90..948bc8889bcd1 100644 --- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala @@ -626,7 +626,7 @@ class JsonProtocolSuite extends SparkFunSuite { val expectedEvent: SparkListenerEnvironmentUpdate = { val e = JsonProtocol.environmentUpdateFromJson(environmentUpdateJsonString) e.copy(environmentDetails = - e.environmentDetails + ("Metrics Properties" -> Seq.empty[(String, String)])) + e.environmentDetails ++ Map("Metrics Properties" -> Seq.empty[(String, String)])) } val oldEnvironmentUpdateJson = environmentUpdateJsonString .removeField("Metrics Properties") diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala index 717c620f5c341..af41d30c2cdb8 100644 --- a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala +++ b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala @@ -53,7 +53,7 @@ private[spark] class YarnClientSchedulerBackend( sc.ui.foreach { ui => conf.set(DRIVER_APP_UI_ADDRESS, ui.webUrl) } val argsArrayBuf = new ArrayBuffer[String]() - argsArrayBuf += ("--arg", hostport) + argsArrayBuf += "--arg" += hostport logDebug("ClientArguments called with: " + argsArrayBuf.mkString(" ")) val args = new ClientArguments(argsArrayBuf.toArray) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala index c6e76df1b31ad..10646130a9106 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala @@ -206,13 +206,12 @@ case class JoinEstimation(join: Join) extends Logging { case _ => computeByNdv(leftKey, rightKey, newMin, newMax) } - keyStatsAfterJoin += ( + keyStatsAfterJoin += // Histograms are propagated as unchanged. During future estimation, they should be // truncated by the updated max/min. In this way, only pointers of the histograms are // propagated and thus reduce memory consumption. - leftKey -> joinStat.copy(histogram = leftKeyStat.histogram), - rightKey -> joinStat.copy(histogram = rightKeyStat.histogram) - ) + (leftKey -> joinStat.copy(histogram = leftKeyStat.histogram)) += + (rightKey -> joinStat.copy(histogram = rightKeyStat.histogram)) // Return cardinality estimated from the most selective join keys. if (card < joinCard) joinCard = card } else { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala index 125952566d7e8..d97ffb6940600 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala @@ -46,12 +46,12 @@ case class DescribeNamespaceExec( } if (isExtended) { - val properties = metadata.asScala -- CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES + val properties = metadata.asScala.toMap -- CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES val propertiesStr = if (properties.isEmpty) { "" } else { - conf.redactOptions(properties.toMap).toSeq.sortBy(_._1).mkString("(", ", ", ")") + conf.redactOptions(properties).toSeq.sortBy(_._1).mkString("(", ", ", ")") } rows += toCatalystRow("Properties", propertiesStr) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala index c43658eacabc2..f9da55ed6ba31 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala @@ -827,7 +827,7 @@ class V2SessionCatalogNamespaceSuite extends V2SessionCatalogBaseSuite { // remove location and comment that are automatically added by HMS unless they are expected val toRemove = CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filter(expected.contains) - assert(expected -- toRemove === actual) + assert(expected.toMap -- toRemove === actual) } test("listNamespaces: basic behavior") {