Skip to content

Commit 03d8015

Browse files
ankurdaveyhuai
authored andcommitted
[SPARK-12298][SQL] Fix infinite loop in DataFrame.sortWithinPartitions
Modifies the String overload to call the Column overload and ensures this is called in a test. Author: Ankur Dave <[email protected]> Closes #10271 from ankurdave/SPARK-12298. (cherry picked from commit 1e799d6) Signed-off-by: Yin Huai <[email protected]>
1 parent c2f2046 commit 03d8015

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ class DataFrame private[sql](
609609
*/
610610
@scala.annotation.varargs
611611
def sortWithinPartitions(sortCol: String, sortCols: String*): DataFrame = {
612-
sortWithinPartitions(sortCol, sortCols : _*)
612+
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
613613
}
614614

615615
/**

sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,8 +1083,8 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
10831083
}
10841084

10851085
// Distribute into one partition and order by. This partition should contain all the values.
1086-
val df6 = data.repartition(1, $"a").sortWithinPartitions($"b".asc)
1087-
// Walk each partition and verify that it is sorted descending and not globally sorted.
1086+
val df6 = data.repartition(1, $"a").sortWithinPartitions("b")
1087+
// Walk each partition and verify that it is sorted ascending and not globally sorted.
10881088
df6.rdd.foreachPartition { p =>
10891089
var previousValue: Int = -1
10901090
var allSequential: Boolean = true

0 commit comments

Comments
 (0)