Skip to content

Commit f6c486a

Browse files
scwfmarmbrus
authored andcommitted
[SQL] [TEST] udf_java_method failed due to jdk version
java.lang.Math.exp(1.0) has different result between jdk versions. so do not use createQueryTest, write a separate test for it. ``` jdk version result 1.7.0_11 2.7182818284590455 1.7.0_05 2.7182818284590455 1.7.0_71 2.718281828459045 ``` Author: scwf <[email protected]> Closes #6274 from scwf/java_method and squashes the following commits: 3dd2516 [scwf] address comments 5fa1459 [scwf] style df46445 [scwf] fix test error fcb6d22 [scwf] fix udf_java_method
1 parent 4f57200 commit f6c486a

File tree

3 files changed

+29
-7
lines changed

3 files changed

+29
-7
lines changed

sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,10 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
250250

251251
// The isolated classloader seemed to make some of our test reset mechanisms less robust.
252252
"combine1", // This test changes compression settings in a way that breaks all subsequent tests.
253-
"load_dyn_part14.*" // These work alone but fail when run with other tests...
253+
"load_dyn_part14.*", // These work alone but fail when run with other tests...
254+
255+
// the answer is sensitive for jdk version
256+
"udf_java_method"
254257
) ++ HiveShim.compatibilityBlackList
255258

256259
/**
@@ -877,7 +880,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter {
877880
"udf_int",
878881
"udf_isnotnull",
879882
"udf_isnull",
880-
"udf_java_method",
881883
"udf_lcase",
882884
"udf_length",
883885
"udf_lessthan",

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,10 @@ package org.apache.spark.sql.hive.execution
2020
import java.io.File
2121
import java.util.{Locale, TimeZone}
2222

23-
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
24-
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
25-
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorFactory, StructObjectInspector, ObjectInspector}
26-
import org.scalatest.BeforeAndAfter
27-
2823
import scala.util.Try
2924

25+
import org.scalatest.BeforeAndAfter
26+
3027
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
3128

3229
import org.apache.spark.{SparkFiles, SparkException}

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -814,4 +814,27 @@ class SQLQuerySuite extends QueryTest {
814814
sql("SELECT cast(key+2 as Int) from df_analysis A group by cast(key+1 as int)")
815815
}
816816
}
817+
818+
// `Math.exp(1.0)` has different result for different jdk version, so not use createQueryTest
819+
test("udf_java_method") {
820+
checkAnswer(sql(
821+
"""
822+
|SELECT java_method("java.lang.String", "valueOf", 1),
823+
| java_method("java.lang.String", "isEmpty"),
824+
| java_method("java.lang.Math", "max", 2, 3),
825+
| java_method("java.lang.Math", "min", 2, 3),
826+
| java_method("java.lang.Math", "round", 2.5),
827+
| java_method("java.lang.Math", "exp", 1.0),
828+
| java_method("java.lang.Math", "floor", 1.9)
829+
|FROM src tablesample (1 rows)
830+
""".stripMargin),
831+
Row(
832+
"1",
833+
"true",
834+
java.lang.Math.max(2, 3).toString,
835+
java.lang.Math.min(2, 3).toString,
836+
java.lang.Math.round(2.5).toString,
837+
java.lang.Math.exp(1.0).toString,
838+
java.lang.Math.floor(1.9).toString))
839+
}
817840
}

0 commit comments

Comments
 (0)