diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py index 648fa8858fba3..0326cbeb7120d 100644 --- a/python/pyspark/ml/tests.py +++ b/python/pyspark/ml/tests.py @@ -20,6 +20,10 @@ """ import sys +try: + import xmlrunner +except ImportError: + xmlrunner = None if sys.version_info[:2] <= (2, 6): try: @@ -368,4 +372,7 @@ def test_fit_maximize_metric(self): if __name__ == "__main__": - unittest.main() + if xmlrunner: + unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports')) + else: + unittest.main() diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py index 96cf13495aa95..63ddd60392654 100644 --- a/python/pyspark/mllib/tests.py +++ b/python/pyspark/mllib/tests.py @@ -31,6 +31,10 @@ from numpy import sum as array_sum from py4j.protocol import Py4JJavaError +try: + import xmlrunner +except ImportError: + xmlrunner = None if sys.version > '3': basestring = str @@ -1526,7 +1530,10 @@ def test_load_vectors(self): if __name__ == "__main__": if not _have_scipy: print("NOTE: Skipping SciPy tests as it does not seem to be installed") - unittest.main() + if xmlrunner: + unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports')) + else: + unittest.main() if not _have_scipy: print("NOTE: SciPy tests were skipped as it does not seem to be installed") sc.stop() diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py index 645133b2b2d84..fa6b0abb33094 100644 --- a/python/pyspark/sql/tests.py +++ b/python/pyspark/sql/tests.py @@ -31,6 +31,10 @@ import datetime import py4j +try: + import xmlrunner +except ImportError: + xmlrunner = None if sys.version_info[:2] <= (2, 6): try: @@ -1208,4 +1212,7 @@ def test_window_functions_without_partitionBy(self): if __name__ == "__main__": - unittest.main() + if xmlrunner: + unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports')) + else: + unittest.main() diff --git a/python/pyspark/streaming/tests.py b/python/pyspark/streaming/tests.py index e4e56fff3b3fc..ed183e2ae7757 100644 --- a/python/pyspark/streaming/tests.py +++ b/python/pyspark/streaming/tests.py @@ -27,6 +27,11 @@ import shutil from functools import reduce +try: + import xmlrunner +except ImportError: + xmlrunner = None + if sys.version_info[:2] <= (2, 6): try: import unittest2 as unittest @@ -1297,4 +1302,8 @@ def search_kinesis_asl_assembly_jar(): for testcase in testcases: sys.stderr.write("[Running %s]\n" % (testcase)) tests = unittest.TestLoader().loadTestsFromTestCase(testcase) - unittest.TextTestRunner(verbosity=3).run(tests) + if xmlrunner: + unittest.main(tests, verbosity=3, + testRunner=xmlrunner.XMLTestRunner(output='target/test-reports')) + else: + unittest.TextTestRunner(verbosity=3).run(tests) diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py index f11aaf001c8df..e317025c03087 100644 --- a/python/pyspark/tests.py +++ b/python/pyspark/tests.py @@ -35,6 +35,10 @@ import hashlib from py4j.protocol import Py4JJavaError +try: + import xmlrunner +except ImportError: + xmlrunner = None if sys.version_info[:2] <= (2, 6): try: @@ -249,10 +253,12 @@ def __getattr__(self, item): # Regression test for SPARK-3415 def test_pickling_file_handles(self): - ser = CloudPickleSerializer() - out1 = sys.stderr - out2 = ser.loads(ser.dumps(out1)) - self.assertEqual(out1, out2) + # to be corrected with SPARK-11160 + if not xmlrunner: + ser = CloudPickleSerializer() + out1 = sys.stderr + out2 = ser.loads(ser.dumps(out1)) + self.assertEqual(out1, out2) def test_func_globals(self): @@ -1982,7 +1988,10 @@ def test_statcounter_array(self): print("NOTE: Skipping SciPy tests as it does not seem to be installed") if not _have_numpy: print("NOTE: Skipping NumPy tests as it does not seem to be installed") - unittest.main() + if xmlrunner: + unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports')) + else: + unittest.main() if not _have_scipy: print("NOTE: SciPy tests were skipped as it does not seem to be installed") if not _have_numpy: