@@ -210,6 +210,28 @@ index 0efe0877e9b..423d3b3d76d 100644
210210 --
211211 -- SELECT_HAVING
212212 -- https://github.com/postgres/postgres/blob/REL_12_BETA2/src/test/regress/sql/select_having.sql
213+ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
214+ index cf40e944c09..fc940f9452f 100644
215+ --- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
216+ +++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
217+ @@ -35,6 +35,7 @@ import org.apache.spark.sql.catalyst.analysis.TempTableAlreadyExistsException
218+ import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
219+ import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, Join, JoinStrategyHint, SHUFFLE_HASH}
220+ import org.apache.spark.sql.catalyst.util.DateTimeConstants
221+ + import org.apache.spark.sql.comet.execution.shuffle.CometShuffleExchangeExec
222+ import org.apache.spark.sql.execution.{ColumnarToRowExec, ExecSubqueryExpression, RDDScanExec, SparkPlan}
223+ import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
224+ import org.apache.spark.sql.execution.columnar._
225+ @@ -516,7 +517,8 @@ class CachedTableSuite extends QueryTest with SQLTestUtils
226+ */
227+ private def verifyNumExchanges(df: DataFrame, expected: Int): Unit = {
228+ assert(
229+ - collect(df.queryExecution.executedPlan) { case e: ShuffleExchangeExec => e }.size == expected)
230+ + collect(df.queryExecution.executedPlan) {
231+ + case _: ShuffleExchangeExec | _: CometShuffleExchangeExec => 1 }.size == expected)
232+ }
233+
234+ test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
213235diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
214236index ea5e47ede55..cab59443c79 100644
215237--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
@@ -829,6 +851,50 @@ index bd9c79e5b96..ab7584e768e 100644
829851 }
830852 assert(fileSourceScanSchemata.size === expectedSchemaCatalogStrings.size,
831853 s"Found ${fileSourceScanSchemata.size} file sources in dataframe, " +
854+ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/V1WriteCommandSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/V1WriteCommandSuite.scala
855+ index ce43edb79c1..89e05c75380 100644
856+ --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/V1WriteCommandSuite.scala
857+ +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/V1WriteCommandSuite.scala
858+ @@ -17,9 +17,10 @@
859+
860+ package org.apache.spark.sql.execution.datasources
861+
862+ - import org.apache.spark.sql.{QueryTest, Row}
863+ + import org.apache.spark.sql.{IgnoreComet, QueryTest, Row}
864+ import org.apache.spark.sql.catalyst.expressions.{Ascending, AttributeReference, NullsFirst, SortOrder}
865+ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Sort}
866+ + import org.apache.spark.sql.comet.CometSortExec
867+ import org.apache.spark.sql.execution.{QueryExecution, SortExec}
868+ import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
869+ import org.apache.spark.sql.internal.SQLConf
870+ @@ -224,7 +225,7 @@ class V1WriteCommandSuite extends QueryTest with SharedSparkSession with V1Write
871+
872+ // assert the outer most sort in the executed plan
873+ assert(plan.collectFirst {
874+ - case s: SortExec => s
875+ + case s: CometSortExec => s.originalPlan
876+ }.exists {
877+ case SortExec(Seq(
878+ SortOrder(AttributeReference("key", IntegerType, _, _), Ascending, NullsFirst, _),
879+ @@ -271,7 +272,7 @@ class V1WriteCommandSuite extends QueryTest with SharedSparkSession with V1Write
880+
881+ // assert the outer most sort in the executed plan
882+ assert(plan.collectFirst {
883+ - case s: SortExec => s
884+ + case s: CometSortExec => s.originalPlan
885+ }.exists {
886+ case SortExec(Seq(
887+ SortOrder(AttributeReference("value", StringType, _, _), Ascending, NullsFirst, _),
888+ @@ -305,7 +306,8 @@ class V1WriteCommandSuite extends QueryTest with SharedSparkSession with V1Write
889+ }
890+ }
891+
892+ - test("v1 write with AQE changing SMJ to BHJ") {
893+ + test("v1 write with AQE changing SMJ to BHJ",
894+ + IgnoreComet("TODO: Comet SMJ to BHJ by AQE")) {
895+ withPlannedWrite { enabled =>
896+ withTable("t") {
897+ sql(
832898diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala
833899index 1d2e467c94c..3ea82cd1a3f 100644
834900--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala
0 commit comments