@@ -72,9 +72,9 @@ class PruneFileSourcePartitionsSuite extends QueryTest with SQLTestUtils with Te
7272 test(" SPARK-20986 Reset table's statistics after PruneFileSourcePartitions rule" ) {
7373 withTempView(" tempTbl" ) {
7474 withTable(" partTbl" ) {
75- spark.range(1000 ).selectExpr(" id" ).createOrReplaceTempView(" tempTbl" )
75+ spark.range(10 ).selectExpr(" id" ).createOrReplaceTempView(" tempTbl" )
7676 sql(" CREATE TABLE partTbl (id INT) PARTITIONED BY (part INT) STORED AS parquet" )
77- for (part <- Seq (1 , 2 , 3 )) {
77+ for (part <- Seq (1 , 2 )) {
7878 sql(
7979 s """
8080 |INSERT OVERWRITE TABLE partTbl PARTITION (part=' $part')
@@ -83,8 +83,7 @@ class PruneFileSourcePartitionsSuite extends QueryTest with SQLTestUtils with Te
8383 }
8484
8585 val tableName = " partTbl"
86- sql(s " analyze table partTbl compute STATISTICS " )
87-
86+ sql(s " ANALYZE TABLE partTbl COMPUTE STATISTICS " )
8887 val tableStats =
8988 spark.sessionState.catalog.getTableMetadata(TableIdentifier (tableName)).stats
9089 assert(tableStats.isDefined && tableStats.get.sizeInBytes > 0 , " tableStats is lost" )
@@ -98,7 +97,7 @@ class PruneFileSourcePartitionsSuite extends QueryTest with SQLTestUtils with Te
9897 assert(sizes1.size === 1 , s " Size wrong for: \n ${df.queryExecution}" )
9998 assert(sizes1(0 ) == tableStats.get.sizeInBytes)
10099 val sizes2 = Optimize .execute(query).collect {
101- case relation : LogicalRelation => relation.computeStats(conf) .sizeInBytes
100+ case relation : LogicalRelation => relation.catalogTable.get.stats.get .sizeInBytes
102101 }
103102 assert(sizes2.size === 1 , s " Size wrong for: \n ${df.queryExecution}" )
104103 assert(sizes2(0 ) < tableStats.get.sizeInBytes)
0 commit comments