Skip to content

Commit 068c35d

Browse files
committed
fix new style and add some tests
1 parent 925203b commit 068c35d

File tree

5 files changed

+138
-8
lines changed

5 files changed

+138
-8
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -186,9 +186,9 @@ case class HashSortedPartitioning(expressions: Seq[Expression], numPartitions: I
186186
extends Expression
187187
with Partitioning {
188188

189-
override def children = expressions
190-
override def nullable = false
191-
override def dataType = IntegerType
189+
override def children: Seq[Expression] = expressions
190+
override def nullable: Boolean = false
191+
override def dataType: DataType = IntegerType
192192

193193
private[this] lazy val clusteringSet = expressions.toSet
194194

sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
9494
case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right)
9595
if sqlContext.conf.autoSortMergeJoin =>
9696
val mergeJoin =
97-
joins.SortMergeJoin(leftKeys, rightKeys, Inner, planLater(left), planLater(right))
97+
joins.SortMergeJoin(leftKeys, rightKeys, planLater(left), planLater(right))
9898
condition.map(Filter(_, mergeJoin)).getOrElse(mergeJoin) :: Nil
9999

100100
case ExtractEquiJoinKeys(Inner, leftKeys, rightKeys, condition, left, right) =>

sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoin.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.spark.sql.execution.joins
1919

2020
import org.apache.spark.annotation.DeveloperApi
21+
import org.apache.spark.rdd.RDD
2122
import org.apache.spark.sql.Row
2223
import org.apache.spark.sql.catalyst.expressions._
2324
import org.apache.spark.sql.catalyst.plans._
@@ -33,7 +34,6 @@ import org.apache.spark.util.collection.CompactBuffer
3334
case class SortMergeJoin(
3435
leftKeys: Seq[Expression],
3536
rightKeys: Seq[Expression],
36-
joinType: JoinType,
3737
left: SparkPlan,
3838
right: SparkPlan) extends BinaryNode {
3939

@@ -52,7 +52,7 @@ case class SortMergeJoin(
5252
@transient protected lazy val leftKeyGenerator = newProjection(leftKeys, left.output)
5353
@transient protected lazy val rightKeyGenerator = newProjection(rightKeys, right.output)
5454

55-
override def execute() = {
55+
override def execute(): RDD[Row] = {
5656
val leftResults = left.execute().map(_.copy())
5757
val rightResults = right.execute().map(_.copy())
5858

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
package org.apache.spark.sql.hive.execution
2+
3+
import org.apache.spark.sql.SQLConf
4+
import org.apache.spark.sql.hive.test.TestHive
5+
6+
/**
7+
* Runs the test cases that are included in the hive distribution with sort merge join is true.
8+
*/
9+
class SortMergeCompatibilitySuite extends HiveCompatibilitySuite {
10+
override def beforeAll() {
11+
super.beforeAll()
12+
TestHive.setConf(SQLConf.AUTO_SORTMERGEJOIN, "true")
13+
}
14+
15+
override def afterAll() {
16+
TestHive.setConf(SQLConf.AUTO_SORTMERGEJOIN, "false")
17+
super.afterAll()
18+
}
19+
20+
override def whiteList = Seq(
21+
"auto_join0",
22+
"auto_join1",
23+
"auto_join10",
24+
"auto_join11",
25+
"auto_join12",
26+
"auto_join13",
27+
"auto_join14",
28+
"auto_join14_hadoop20",
29+
"auto_join15",
30+
"auto_join17",
31+
"auto_join18",
32+
"auto_join19",
33+
"auto_join2",
34+
"auto_join20",
35+
"auto_join21",
36+
"auto_join22",
37+
"auto_join23",
38+
"auto_join24",
39+
"auto_join25",
40+
"auto_join26",
41+
"auto_join27",
42+
"auto_join28",
43+
"auto_join3",
44+
"auto_join30",
45+
"auto_join31",
46+
"auto_join32",
47+
"auto_join4",
48+
"auto_join5",
49+
"auto_join6",
50+
"auto_join7",
51+
"auto_join8",
52+
"auto_join9",
53+
"auto_join_filters",
54+
"auto_join_nulls",
55+
"auto_join_reordering_values",
56+
"auto_smb_mapjoin_14",
57+
"auto_sortmerge_join_1",
58+
"auto_sortmerge_join_10",
59+
"auto_sortmerge_join_11",
60+
"auto_sortmerge_join_12",
61+
"auto_sortmerge_join_13",
62+
"auto_sortmerge_join_14",
63+
"auto_sortmerge_join_15",
64+
"auto_sortmerge_join_16",
65+
"auto_sortmerge_join_2",
66+
"auto_sortmerge_join_3",
67+
"auto_sortmerge_join_4",
68+
"auto_sortmerge_join_5",
69+
"auto_sortmerge_join_6",
70+
"auto_sortmerge_join_7",
71+
"auto_sortmerge_join_8",
72+
"auto_sortmerge_join_9",
73+
"join0",
74+
"join1",
75+
"join10",
76+
"join11",
77+
"join12",
78+
"join13",
79+
"join14",
80+
"join14_hadoop20",
81+
"join15",
82+
"join16",
83+
"join17",
84+
"join18",
85+
"join19",
86+
"join2",
87+
"join20",
88+
"join21",
89+
"join22",
90+
"join23",
91+
"join24",
92+
"join25",
93+
"join26",
94+
"join27",
95+
"join28",
96+
"join29",
97+
"join3",
98+
"join30",
99+
"join31",
100+
"join32",
101+
"join32_lessSize",
102+
"join33",
103+
"join34",
104+
"join35",
105+
"join36",
106+
"join37",
107+
"join38",
108+
"join39",
109+
"join4",
110+
"join40",
111+
"join41",
112+
"join5",
113+
"join6",
114+
"join7",
115+
"join8",
116+
"join9",
117+
"join_1to1",
118+
"join_array",
119+
"join_casesensitive",
120+
"join_empty",
121+
"join_filters",
122+
"join_hive_626",
123+
"join_map_ppr",
124+
"join_nulls",
125+
"join_nullsafe",
126+
"join_rc",
127+
"join_reorder2",
128+
"join_reorder3",
129+
"join_reorder4",
130+
"join_star"
131+
)
132+
}

sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ class StatisticsSuite extends QueryTest with BeforeAndAfterAll {
144144
expectedAnswer: Seq[Row],
145145
ct: ClassTag[_]) = {
146146
before()
147-
conf.setConf("spark.sql.autoSortMergeJoin", "false")
148147

149148
var df = sql(query)
150149

@@ -179,7 +178,6 @@ class StatisticsSuite extends QueryTest with BeforeAndAfterAll {
179178
sql(s"""SET ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD}=$tmp""")
180179
}
181180

182-
conf.setConf("spark.sql.autoSortMergeJoin", "true")
183181
after()
184182
}
185183

0 commit comments

Comments
 (0)