Skip to content

Commit 0827ede

Browse files
committed
Address comments
1 parent bc4d71d commit 0827ede

File tree

9 files changed

+31
-73
lines changed

9 files changed

+31
-73
lines changed

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import org.scalatest.Suite
2222
import org.scalatest.Tag
2323

2424
import org.apache.spark.SparkFunSuite
25-
import org.apache.spark.sql.AnalysisException
2625
import org.apache.spark.sql.catalyst.analysis.SimpleAnalyzer
2726
import org.apache.spark.sql.catalyst.expressions._
2827
import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode
@@ -57,7 +56,7 @@ trait CodegenInterpretedPlanTest extends PlanTest {
5756
* Provides helper methods for comparing plans, but without the overhead of
5857
* mandating a FunSuite.
5958
*/
60-
trait PlanTestBase extends PredicateHelper with SupportWithSQLConf { self: Suite =>
59+
trait PlanTestBase extends PredicateHelper with SQLHelper { self: Suite =>
6160

6261
// TODO(gatorsmile): remove this from PlanTest and all the analyzer rules
6362
protected def conf = SQLConf.get
Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,13 @@
1616
*/
1717
package org.apache.spark.sql.catalyst.plans
1818

19+
import java.io.File
20+
1921
import org.apache.spark.sql.AnalysisException
2022
import org.apache.spark.sql.internal.SQLConf
23+
import org.apache.spark.util.Utils
2124

22-
trait SupportWithSQLConf {
25+
trait SQLHelper {
2326

2427
/**
2528
* Sets all SQL configurations specified in `pairs`, calls `f`, and then restores all SQL
@@ -48,4 +51,14 @@ trait SupportWithSQLConf {
4851
}
4952
}
5053
}
54+
55+
/**
56+
* Generates a temporary path without creating the actual file/directory, then pass it to `f`. If
57+
* a file/directory is created there by `f`, it will be delete after `f` returns.
58+
*/
59+
protected def withTempPath(f: File => Unit): Unit = {
60+
val path = Utils.createTempDir()
61+
path.delete()
62+
try f(path) finally Utils.deleteRecursively(path)
63+
}
5164
}

sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,26 +19,25 @@ package org.apache.spark.sql.execution.benchmark
1919
import java.io.File
2020

2121
import scala.collection.JavaConverters._
22-
import scala.util.{Random, Try}
22+
import scala.util.Random
2323

2424
import org.apache.spark.SparkConf
2525
import org.apache.spark.benchmark.Benchmark
2626
import org.apache.spark.sql.{DataFrame, DataFrameWriter, Row, SparkSession}
2727
import org.apache.spark.sql.catalyst.InternalRow
28-
import org.apache.spark.sql.catalyst.plans.SupportWithSQLConf
28+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2929
import org.apache.spark.sql.execution.datasources.parquet.{SpecificParquetRecordReaderBase, VectorizedParquetRecordReader}
3030
import org.apache.spark.sql.internal.SQLConf
3131
import org.apache.spark.sql.types._
3232
import org.apache.spark.sql.vectorized.ColumnVector
33-
import org.apache.spark.util.Utils
3433

3534

3635
/**
3736
* Benchmark to measure data source read performance.
3837
* To run this:
3938
* spark-submit --class <this class> <spark sql test jar>
4039
*/
41-
object DataSourceReadBenchmark extends SupportWithSQLConf {
40+
object DataSourceReadBenchmark extends SQLHelper {
4241
val conf = new SparkConf()
4342
.setAppName("DataSourceReadBenchmark")
4443
// Since `spark.master` always exists, overrides this value
@@ -55,12 +54,6 @@ object DataSourceReadBenchmark extends SupportWithSQLConf {
5554
spark.conf.set(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key, "true")
5655
spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
5756

58-
def withTempPath(f: File => Unit): Unit = {
59-
val path = Utils.createTempDir()
60-
path.delete()
61-
try f(path) finally Utils.deleteRecursively(path)
62-
}
63-
6457
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
6558
try f finally tableNames.foreach(spark.catalog.dropTempView)
6659
}

sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,16 @@ package org.apache.spark.sql.execution.benchmark
1919

2020
import java.io.File
2121

22-
import scala.util.{Random, Try}
22+
import scala.util.Random
2323

2424
import org.apache.spark.SparkConf
2525
import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
2626
import org.apache.spark.sql.{DataFrame, SparkSession}
27-
import org.apache.spark.sql.catalyst.plans.SupportWithSQLConf
27+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2828
import org.apache.spark.sql.functions.monotonically_increasing_id
2929
import org.apache.spark.sql.internal.SQLConf
3030
import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType
3131
import org.apache.spark.sql.types.{ByteType, Decimal, DecimalType, TimestampType}
32-
import org.apache.spark.util.Utils
3332

3433
/**
3534
* Benchmark to measure read performance with Filter pushdown.
@@ -41,7 +40,7 @@ import org.apache.spark.util.Utils
4140
* Results will be written to "benchmarks/FilterPushdownBenchmark-results.txt".
4241
* }}}
4342
*/
44-
object FilterPushdownBenchmark extends BenchmarkBase with SupportWithSQLConf {
43+
object FilterPushdownBenchmark extends BenchmarkBase with SQLHelper {
4544

4645
private val conf = new SparkConf()
4746
.setAppName(this.getClass.getSimpleName)
@@ -61,12 +60,6 @@ object FilterPushdownBenchmark extends BenchmarkBase with SupportWithSQLConf {
6160

6261
private val spark = SparkSession.builder().config(conf).getOrCreate()
6362

64-
def withTempPath(f: File => Unit): Unit = {
65-
val path = Utils.createTempDir()
66-
path.delete()
67-
try f(path) finally Utils.deleteRecursively(path)
68-
}
69-
7063
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
7164
try f finally tableNames.foreach(spark.catalog.dropTempView)
7265
}

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVBenchmarks.scala

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,19 @@
1616
*/
1717
package org.apache.spark.sql.execution.datasources.csv
1818

19-
import java.io.File
20-
2119
import org.apache.spark.SparkConf
2220
import org.apache.spark.benchmark.Benchmark
2321
import org.apache.spark.sql.{Column, Row, SparkSession}
22+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2423
import org.apache.spark.sql.functions.lit
2524
import org.apache.spark.sql.types._
26-
import org.apache.spark.util.Utils
2725

2826
/**
2927
* Benchmark to measure CSV read/write performance.
3028
* To run this:
3129
* spark-submit --class <this class> --jars <spark sql test jar>
3230
*/
33-
object CSVBenchmarks {
31+
object CSVBenchmarks extends SQLHelper {
3432
val conf = new SparkConf()
3533

3634
val spark = SparkSession.builder
@@ -40,12 +38,6 @@ object CSVBenchmarks {
4038
.getOrCreate()
4139
import spark.implicits._
4240

43-
def withTempPath(f: File => Unit): Unit = {
44-
val path = Utils.createTempDir()
45-
path.delete()
46-
try f(path) finally Utils.deleteRecursively(path)
47-
}
48-
4941
def quotedValuesBenchmark(rowsNum: Int, numIters: Int): Unit = {
5042
val benchmark = new Benchmark(s"Parsing quoted values", rowsNum)
5143

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmarks.scala

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,16 +21,16 @@ import java.io.File
2121
import org.apache.spark.SparkConf
2222
import org.apache.spark.benchmark.Benchmark
2323
import org.apache.spark.sql.{Row, SparkSession}
24+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2425
import org.apache.spark.sql.functions.lit
2526
import org.apache.spark.sql.types._
26-
import org.apache.spark.util.Utils
2727

2828
/**
2929
* The benchmarks aims to measure performance of JSON parsing when encoding is set and isn't.
3030
* To run this:
3131
* spark-submit --class <this class> --jars <spark sql test jar>
3232
*/
33-
object JSONBenchmarks {
33+
object JSONBenchmarks extends SQLHelper {
3434
val conf = new SparkConf()
3535

3636
val spark = SparkSession.builder
@@ -40,13 +40,6 @@ object JSONBenchmarks {
4040
.getOrCreate()
4141
import spark.implicits._
4242

43-
def withTempPath(f: File => Unit): Unit = {
44-
val path = Utils.createTempDir()
45-
path.delete()
46-
try f(path) finally Utils.deleteRecursively(path)
47-
}
48-
49-
5043
def schemaInferring(rowsNum: Int): Unit = {
5144
val benchmark = new Benchmark("JSON schema inferring", rowsNum)
5245

sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/CheckpointFileManagerSuite.scala

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,12 @@ import org.apache.hadoop.conf.Configuration
2525
import org.apache.hadoop.fs._
2626

2727
import org.apache.spark.SparkFunSuite
28+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2829
import org.apache.spark.sql.catalyst.util.quietly
2930
import org.apache.spark.sql.internal.SQLConf
3031
import org.apache.spark.sql.test.SharedSparkSession
31-
import org.apache.spark.util.Utils
3232

33-
abstract class CheckpointFileManagerTests extends SparkFunSuite {
33+
abstract class CheckpointFileManagerTests extends SparkFunSuite with SQLHelper {
3434

3535
def createManager(path: Path): CheckpointFileManager
3636

@@ -88,12 +88,6 @@ abstract class CheckpointFileManagerTests extends SparkFunSuite {
8888
fm.delete(path) // should not throw exception
8989
}
9090
}
91-
92-
protected def withTempPath(f: File => Unit): Unit = {
93-
val path = Utils.createTempDir()
94-
path.delete()
95-
try f(path) finally Utils.deleteRecursively(path)
96-
}
9791
}
9892

9993
class CheckpointFileManagerSuite extends SparkFunSuite with SharedSparkSession {

sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ import org.apache.spark.sql.catalyst.plans.PlanTestBase
4040
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
4141
import org.apache.spark.sql.catalyst.util._
4242
import org.apache.spark.sql.execution.FilterExec
43-
import org.apache.spark.sql.internal.SQLConf
4443
import org.apache.spark.util.UninterruptibleThread
4544
import org.apache.spark.util.Utils
4645

@@ -167,18 +166,6 @@ private[sql] trait SQLTestUtilsBase
167166
super.withSQLConf(pairs: _*)(f)
168167
}
169168

170-
/**
171-
* Generates a temporary path without creating the actual file/directory, then pass it to `f`. If
172-
* a file/directory is created there by `f`, it will be delete after `f` returns.
173-
*
174-
* @todo Probably this method should be moved to a more general place
175-
*/
176-
protected def withTempPath(f: File => Unit): Unit = {
177-
val path = Utils.createTempDir()
178-
path.delete()
179-
try f(path) finally Utils.deleteRecursively(path)
180-
}
181-
182169
/**
183170
* Copy file in jar's resource to a temp file, then pass it to `f`.
184171
* This function is used to make `f` can use the path of temp file(e.g. file:/), instead of

sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,23 +19,23 @@ package org.apache.spark.sql.hive.orc
1919

2020
import java.io.File
2121

22-
import scala.util.{Random, Try}
22+
import scala.util.Random
2323

2424
import org.apache.spark.SparkConf
2525
import org.apache.spark.benchmark.Benchmark
2626
import org.apache.spark.sql.{DataFrame, SparkSession}
27-
import org.apache.spark.sql.catalyst.plans.SupportWithSQLConf
27+
import org.apache.spark.sql.catalyst.plans.SQLHelper
2828
import org.apache.spark.sql.internal.SQLConf
2929
import org.apache.spark.sql.types._
30-
import org.apache.spark.util.Utils
30+
3131

3232
/**
3333
* Benchmark to measure ORC read performance.
3434
*
3535
* This is in `sql/hive` module in order to compare `sql/core` and `sql/hive` ORC data sources.
3636
*/
3737
// scalastyle:off line.size.limit
38-
object OrcReadBenchmark extends SupportWithSQLConf {
38+
object OrcReadBenchmark extends SQLHelper {
3939
val conf = new SparkConf()
4040
conf.set("orc.compression", "snappy")
4141

@@ -48,12 +48,6 @@ object OrcReadBenchmark extends SupportWithSQLConf {
4848
// Set default configs. Individual cases will change them if necessary.
4949
spark.conf.set(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key, "true")
5050

51-
def withTempPath(f: File => Unit): Unit = {
52-
val path = Utils.createTempDir()
53-
path.delete()
54-
try f(path) finally Utils.deleteRecursively(path)
55-
}
56-
5751
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
5852
try f finally tableNames.foreach(spark.catalog.dropTempView)
5953
}

0 commit comments

Comments
 (0)