@@ -53,18 +53,20 @@ test_that("spark.bisectingKmeans", {
5353 c(0 , 1 , 2 , 3 ))
5454
5555 # Test model save/load
56- modelPath <- tempfile(pattern = " spark-bisectingkmeans" , fileext = " .tmp" )
57- write.ml(model , modelPath )
58- expect_error(write.ml(model , modelPath ))
59- write.ml(model , modelPath , overwrite = TRUE )
60- model2 <- read.ml(modelPath )
61- summary2 <- summary(model2 )
62- expect_equal(sort(unlist(summary.model $ size )), sort(unlist(summary2 $ size )))
63- expect_equal(summary.model $ coefficients , summary2 $ coefficients )
64- expect_true(! summary.model $ is.loaded )
65- expect_true(summary2 $ is.loaded )
66-
67- unlink(modelPath )
56+ if (not_cran_or_windows_with_hadoop()) {
57+ modelPath <- tempfile(pattern = " spark-bisectingkmeans" , fileext = " .tmp" )
58+ write.ml(model , modelPath )
59+ expect_error(write.ml(model , modelPath ))
60+ write.ml(model , modelPath , overwrite = TRUE )
61+ model2 <- read.ml(modelPath )
62+ summary2 <- summary(model2 )
63+ expect_equal(sort(unlist(summary.model $ size )), sort(unlist(summary2 $ size )))
64+ expect_equal(summary.model $ coefficients , summary2 $ coefficients )
65+ expect_true(! summary.model $ is.loaded )
66+ expect_true(summary2 $ is.loaded )
67+
68+ unlink(modelPath )
69+ }
6870})
6971
7072test_that(" spark.gaussianMixture" , {
@@ -125,18 +127,20 @@ test_that("spark.gaussianMixture", {
125127 expect_equal(p $ prediction , c(0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ))
126128
127129 # Test model save/load
128- modelPath <- tempfile(pattern = " spark-gaussianMixture" , fileext = " .tmp" )
129- write.ml(model , modelPath )
130- expect_error(write.ml(model , modelPath ))
131- write.ml(model , modelPath , overwrite = TRUE )
132- model2 <- read.ml(modelPath )
133- stats2 <- summary(model2 )
134- expect_equal(stats $ lambda , stats2 $ lambda )
135- expect_equal(unlist(stats $ mu ), unlist(stats2 $ mu ))
136- expect_equal(unlist(stats $ sigma ), unlist(stats2 $ sigma ))
137- expect_equal(unlist(stats $ loglik ), unlist(stats2 $ loglik ))
138-
139- unlink(modelPath )
130+ if (not_cran_or_windows_with_hadoop()) {
131+ modelPath <- tempfile(pattern = " spark-gaussianMixture" , fileext = " .tmp" )
132+ write.ml(model , modelPath )
133+ expect_error(write.ml(model , modelPath ))
134+ write.ml(model , modelPath , overwrite = TRUE )
135+ model2 <- read.ml(modelPath )
136+ stats2 <- summary(model2 )
137+ expect_equal(stats $ lambda , stats2 $ lambda )
138+ expect_equal(unlist(stats $ mu ), unlist(stats2 $ mu ))
139+ expect_equal(unlist(stats $ sigma ), unlist(stats2 $ sigma ))
140+ expect_equal(unlist(stats $ loglik ), unlist(stats2 $ loglik ))
141+
142+ unlink(modelPath )
143+ }
140144})
141145
142146test_that(" spark.kmeans" , {
@@ -171,18 +175,20 @@ test_that("spark.kmeans", {
171175 expect_true(class(summary.model $ coefficients [1 , ]) == " numeric" )
172176
173177 # Test model save/load
174- modelPath <- tempfile(pattern = " spark-kmeans" , fileext = " .tmp" )
175- write.ml(model , modelPath )
176- expect_error(write.ml(model , modelPath ))
177- write.ml(model , modelPath , overwrite = TRUE )
178- model2 <- read.ml(modelPath )
179- summary2 <- summary(model2 )
180- expect_equal(sort(unlist(summary.model $ size )), sort(unlist(summary2 $ size )))
181- expect_equal(summary.model $ coefficients , summary2 $ coefficients )
182- expect_true(! summary.model $ is.loaded )
183- expect_true(summary2 $ is.loaded )
184-
185- unlink(modelPath )
178+ if (not_cran_or_windows_with_hadoop()) {
179+ modelPath <- tempfile(pattern = " spark-kmeans" , fileext = " .tmp" )
180+ write.ml(model , modelPath )
181+ expect_error(write.ml(model , modelPath ))
182+ write.ml(model , modelPath , overwrite = TRUE )
183+ model2 <- read.ml(modelPath )
184+ summary2 <- summary(model2 )
185+ expect_equal(sort(unlist(summary.model $ size )), sort(unlist(summary2 $ size )))
186+ expect_equal(summary.model $ coefficients , summary2 $ coefficients )
187+ expect_true(! summary.model $ is.loaded )
188+ expect_true(summary2 $ is.loaded )
189+
190+ unlink(modelPath )
191+ }
186192
187193 # Test Kmeans on dataset that is sensitive to seed value
188194 col1 <- c(1 , 2 , 3 , 4 , 0 , 1 , 2 , 3 , 4 , 0 )
@@ -236,22 +242,24 @@ test_that("spark.lda with libsvm", {
236242 expect_true(logPrior < = 0 & ! is.na(logPrior ))
237243
238244 # Test model save/load
239- modelPath <- tempfile(pattern = " spark-lda" , fileext = " .tmp" )
240- write.ml(model , modelPath )
241- expect_error(write.ml(model , modelPath ))
242- write.ml(model , modelPath , overwrite = TRUE )
243- model2 <- read.ml(modelPath )
244- stats2 <- summary(model2 )
245-
246- expect_true(stats2 $ isDistributed )
247- expect_equal(logLikelihood , stats2 $ logLikelihood )
248- expect_equal(logPerplexity , stats2 $ logPerplexity )
249- expect_equal(vocabSize , stats2 $ vocabSize )
250- expect_equal(vocabulary , stats2 $ vocabulary )
251- expect_equal(trainingLogLikelihood , stats2 $ trainingLogLikelihood )
252- expect_equal(logPrior , stats2 $ logPrior )
253-
254- unlink(modelPath )
245+ if (not_cran_or_windows_with_hadoop()) {
246+ modelPath <- tempfile(pattern = " spark-lda" , fileext = " .tmp" )
247+ write.ml(model , modelPath )
248+ expect_error(write.ml(model , modelPath ))
249+ write.ml(model , modelPath , overwrite = TRUE )
250+ model2 <- read.ml(modelPath )
251+ stats2 <- summary(model2 )
252+
253+ expect_true(stats2 $ isDistributed )
254+ expect_equal(logLikelihood , stats2 $ logLikelihood )
255+ expect_equal(logPerplexity , stats2 $ logPerplexity )
256+ expect_equal(vocabSize , stats2 $ vocabSize )
257+ expect_equal(vocabulary , stats2 $ vocabulary )
258+ expect_equal(trainingLogLikelihood , stats2 $ trainingLogLikelihood )
259+ expect_equal(logPrior , stats2 $ logPrior )
260+
261+ unlink(modelPath )
262+ }
255263})
256264
257265test_that(" spark.lda with text input" , {
0 commit comments