Skip to content

Commit 0332063

Browse files
committed
[SPARK-20410][SQL] Make sparkConf a def in SharedSQLContext
## What changes were proposed in this pull request? It is kind of annoying that `SharedSQLContext.sparkConf` is a val when overriding test cases, because you cannot call `super` on it. This PR makes it a function. ## How was this patch tested? Existing tests. Author: Herman van Hovell <[email protected]> Closes #17705 from hvanhovell/SPARK-20410.
1 parent d95e4d9 commit 0332063

File tree

7 files changed

+32
-43
lines changed

7 files changed

+32
-43
lines changed

sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,12 @@ package org.apache.spark.sql
1919

2020
import org.scalatest.BeforeAndAfter
2121

22-
class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
22+
import org.apache.spark.SparkConf
2323

24-
protected override def beforeAll(): Unit = {
25-
sparkConf.set("spark.sql.codegen.fallback", "false")
26-
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
27-
super.beforeAll()
28-
}
24+
class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
25+
override protected def sparkConf: SparkConf = super.sparkConf
26+
.set("spark.sql.codegen.fallback", "false")
27+
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
2928

3029
// adding some checking after each test is run, assuring that the configs are not changed
3130
// in test code
@@ -38,12 +37,9 @@ class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with Befo
3837
}
3938

4039
class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
41-
42-
protected override def beforeAll(): Unit = {
43-
sparkConf.set("spark.sql.codegen.fallback", "false")
44-
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
45-
super.beforeAll()
46-
}
40+
override protected def sparkConf: SparkConf = super.sparkConf
41+
.set("spark.sql.codegen.fallback", "false")
42+
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
4743

4844
// adding some checking after each test is run, assuring that the configs are not changed
4945
// in test code
@@ -55,15 +51,14 @@ class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeA
5551
}
5652
}
5753

58-
class TwoLevelAggregateHashMapWithVectorizedMapSuite extends DataFrameAggregateSuite with
59-
BeforeAndAfter {
54+
class TwoLevelAggregateHashMapWithVectorizedMapSuite
55+
extends DataFrameAggregateSuite
56+
with BeforeAndAfter {
6057

61-
protected override def beforeAll(): Unit = {
62-
sparkConf.set("spark.sql.codegen.fallback", "false")
63-
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
64-
sparkConf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
65-
super.beforeAll()
66-
}
58+
override protected def sparkConf: SparkConf = super.sparkConf
59+
.set("spark.sql.codegen.fallback", "false")
60+
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
61+
.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
6762

6863
// adding some checking after each test is run, assuring that the configs are not changed
6964
// in test code

sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,22 +20,20 @@ package org.apache.spark.sql
2020
import com.esotericsoftware.kryo.{Kryo, Serializer}
2121
import com.esotericsoftware.kryo.io.{Input, Output}
2222

23+
import org.apache.spark.SparkConf
2324
import org.apache.spark.serializer.KryoRegistrator
2425
import org.apache.spark.sql.test.SharedSQLContext
25-
import org.apache.spark.sql.test.TestSparkSession
2626

2727
/**
2828
* Test suite to test Kryo custom registrators.
2929
*/
3030
class DatasetSerializerRegistratorSuite extends QueryTest with SharedSQLContext {
3131
import testImplicits._
3232

33-
/**
34-
* Initialize the [[TestSparkSession]] with a [[KryoRegistrator]].
35-
*/
36-
protected override def beforeAll(): Unit = {
37-
sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
38-
super.beforeAll()
33+
34+
override protected def sparkConf: SparkConf = {
35+
// Make sure we use the KryoRegistrator
36+
super.sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
3937
}
4038

4139
test("Kryo registrator") {

sql/core/src/test/scala/org/apache/spark/sql/execution/DataSourceScanExecRedactionSuite.scala

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,22 +18,17 @@ package org.apache.spark.sql.execution
1818

1919
import org.apache.hadoop.fs.Path
2020

21+
import org.apache.spark.SparkConf
2122
import org.apache.spark.sql.QueryTest
2223
import org.apache.spark.sql.test.SharedSQLContext
23-
import org.apache.spark.util.Utils
2424

2525
/**
2626
* Suite that tests the redaction of DataSourceScanExec
2727
*/
2828
class DataSourceScanExecRedactionSuite extends QueryTest with SharedSQLContext {
2929

30-
import Utils._
31-
32-
override def beforeAll(): Unit = {
33-
sparkConf.set("spark.redaction.string.regex",
34-
"file:/[\\w_]+")
35-
super.beforeAll()
36-
}
30+
override protected def sparkConf: SparkConf = super.sparkConf
31+
.set("spark.redaction.string.regex", "file:/[\\w_]+")
3732

3833
test("treeString is redacted") {
3934
withTempDir { dir =>

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ import org.apache.spark.util.Utils
4242
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
4343
import testImplicits._
4444

45-
protected override val sparkConf = new SparkConf().set("spark.default.parallelism", "1")
45+
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")
4646

4747
test("unpartitioned table, single partition") {
4848
val table =

sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLogSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ import org.apache.spark.sql.test.SharedSQLContext
2828
class CompactibleFileStreamLogSuite extends SparkFunSuite with SharedSQLContext {
2929

3030
/** To avoid caching of FS objects */
31-
override protected val sparkConf =
32-
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
31+
override protected def sparkConf =
32+
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
3333

3434
import CompactibleFileStreamLog._
3535

sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ import org.apache.spark.util.UninterruptibleThread
3838
class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext {
3939

4040
/** To avoid caching of FS objects */
41-
override protected val sparkConf =
42-
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
41+
override protected def sparkConf =
42+
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
4343

4444
private implicit def toOption[A](a: A): Option[A] = Option(a)
4545

sql/core/src/test/scala/org/apache/spark/sql/test/SharedSQLContext.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,9 @@ import org.apache.spark.sql.{SparkSession, SQLContext}
3030
*/
3131
trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach with Eventually {
3232

33-
protected val sparkConf = new SparkConf()
33+
protected def sparkConf = {
34+
new SparkConf().set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName)
35+
}
3436

3537
/**
3638
* The [[TestSparkSession]] to use for all tests in this suite.
@@ -51,8 +53,7 @@ trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach with Eventua
5153
protected implicit def sqlContext: SQLContext = _spark.sqlContext
5254

5355
protected def createSparkSession: TestSparkSession = {
54-
new TestSparkSession(
55-
sparkConf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
56+
new TestSparkSession(sparkConf)
5657
}
5758

5859
/**

0 commit comments

Comments
 (0)