@@ -180,7 +180,7 @@ setMethod("getJRDD", signature(rdd = "PipelinedRDD"),
180180 }
181181 # Save the serialization flag after we create a RRDD
182182 rdd @ env $ serializedMode <- serializedMode
183- rdd @ env $ jrdd_val <- callJMethod(rddRef , " asJavaRDD" ) # rddRef$asJavaRDD()
183+ rdd @ env $ jrdd_val <- callJMethod(rddRef , " asJavaRDD" )
184184 rdd @ env $ jrdd_val
185185 })
186186
@@ -225,7 +225,7 @@ setMethod("cache",
225225# '
226226# ' Persist this RDD with the specified storage level. For details of the
227227# ' supported storage levels, refer to
228- # ' http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence.
228+ # '\url{ http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence} .
229229# '
230230# ' @param x The RDD to persist
231231# ' @param newLevel The new storage level to be assigned
@@ -382,11 +382,13 @@ setMethod("collectPartition",
382382# ' \code{collectAsMap} returns a named list as a map that contains all of the elements
383383# ' in a key-value pair RDD.
384384# ' @examples
385+ # nolint start
385386# '\dontrun{
386387# ' sc <- sparkR.init()
387388# ' rdd <- parallelize(sc, list(list(1, 2), list(3, 4)), 2L)
388389# ' collectAsMap(rdd) # list(`1` = 2, `3` = 4)
389390# '}
391+ # nolint end
390392# ' @rdname collect-methods
391393# ' @aliases collectAsMap,RDD-method
392394# ' @noRd
@@ -442,11 +444,13 @@ setMethod("length",
442444# ' @return list of (value, count) pairs, where count is number of each unique
443445# ' value in rdd.
444446# ' @examples
447+ # nolint start
445448# '\dontrun{
446449# ' sc <- sparkR.init()
447450# ' rdd <- parallelize(sc, c(1,2,3,2,1))
448451# ' countByValue(rdd) # (1,2L), (2,2L), (3,1L)
449452# '}
453+ # nolint end
450454# ' @rdname countByValue
451455# ' @aliases countByValue,RDD-method
452456# ' @noRd
@@ -597,11 +601,13 @@ setMethod("mapPartitionsWithIndex",
597601# ' @param x The RDD to be filtered.
598602# ' @param f A unary predicate function.
599603# ' @examples
604+ # nolint start
600605# '\dontrun{
601606# ' sc <- sparkR.init()
602607# ' rdd <- parallelize(sc, 1:10)
603608# ' unlist(collect(filterRDD(rdd, function (x) { x < 3 }))) # c(1, 2)
604609# '}
610+ # nolint end
605611# ' @rdname filterRDD
606612# ' @aliases filterRDD,RDD,function-method
607613# ' @noRd
@@ -756,11 +762,13 @@ setMethod("foreachPartition",
756762# ' @param x The RDD to take elements from
757763# ' @param num Number of elements to take
758764# ' @examples
765+ # nolint start
759766# '\dontrun{
760767# ' sc <- sparkR.init()
761768# ' rdd <- parallelize(sc, 1:10)
762769# ' take(rdd, 2L) # list(1, 2)
763770# '}
771+ # nolint end
764772# ' @rdname take
765773# ' @aliases take,RDD,numeric-method
766774# ' @noRd
@@ -824,11 +832,13 @@ setMethod("first",
824832# ' @param x The RDD to remove duplicates from.
825833# ' @param numPartitions Number of partitions to create.
826834# ' @examples
835+ # nolint start
827836# '\dontrun{
828837# ' sc <- sparkR.init()
829838# ' rdd <- parallelize(sc, c(1,2,2,3,3,3))
830839# ' sort(unlist(collect(distinct(rdd)))) # c(1, 2, 3)
831840# '}
841+ # nolint end
832842# ' @rdname distinct
833843# ' @aliases distinct,RDD-method
834844# ' @noRd
@@ -974,11 +984,13 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical",
974984# ' @param x The RDD.
975985# ' @param func The function to be applied.
976986# ' @examples
987+ # nolint start
977988# '\dontrun{
978989# ' sc <- sparkR.init()
979990# ' rdd <- parallelize(sc, list(1, 2, 3))
980991# ' collect(keyBy(rdd, function(x) { x*x })) # list(list(1, 1), list(4, 2), list(9, 3))
981992# '}
993+ # nolint end
982994# ' @rdname keyBy
983995# ' @aliases keyBy,RDD
984996# ' @noRd
@@ -1113,11 +1125,13 @@ setMethod("saveAsTextFile",
11131125# ' @param numPartitions Number of partitions to create.
11141126# ' @return An RDD where all elements are sorted.
11151127# ' @examples
1128+ # nolint start
11161129# '\dontrun{
11171130# ' sc <- sparkR.init()
11181131# ' rdd <- parallelize(sc, list(3, 2, 1))
11191132# ' collect(sortBy(rdd, function(x) { x })) # list (1, 2, 3)
11201133# '}
1134+ # nolint end
11211135# ' @rdname sortBy
11221136# ' @aliases sortBy,RDD,RDD-method
11231137# ' @noRd
@@ -1188,11 +1202,13 @@ takeOrderedElem <- function(x, num, ascending = TRUE) {
11881202# ' @param num Number of elements to return.
11891203# ' @return The first N elements from the RDD in ascending order.
11901204# ' @examples
1205+ # nolint start
11911206# '\dontrun{
11921207# ' sc <- sparkR.init()
11931208# ' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
11941209# ' takeOrdered(rdd, 6L) # list(1, 2, 3, 4, 5, 6)
11951210# '}
1211+ # nolint end
11961212# ' @rdname takeOrdered
11971213# ' @aliases takeOrdered,RDD,RDD-method
11981214# ' @noRd
@@ -1209,11 +1225,13 @@ setMethod("takeOrdered",
12091225# ' @return The top N elements from the RDD.
12101226# ' @rdname top
12111227# ' @examples
1228+ # nolint start
12121229# '\dontrun{
12131230# ' sc <- sparkR.init()
12141231# ' rdd <- parallelize(sc, list(10, 1, 2, 9, 3, 4, 5, 6, 7))
12151232# ' top(rdd, 6L) # list(10, 9, 7, 6, 5, 4)
12161233# '}
1234+ # nolint end
12171235# ' @aliases top,RDD,RDD-method
12181236# ' @noRd
12191237setMethod ("top ",
@@ -1261,6 +1279,7 @@ setMethod("fold",
12611279# ' @rdname aggregateRDD
12621280# ' @seealso reduce
12631281# ' @examples
1282+ # nolint start
12641283# '\dontrun{
12651284# ' sc <- sparkR.init()
12661285# ' rdd <- parallelize(sc, list(1, 2, 3, 4))
@@ -1269,6 +1288,7 @@ setMethod("fold",
12691288# ' combOp <- function(x, y) { list(x[[1]] + y[[1]], x[[2]] + y[[2]]) }
12701289# ' aggregateRDD(rdd, zeroValue, seqOp, combOp) # list(10, 4)
12711290# '}
1291+ # nolint end
12721292# ' @aliases aggregateRDD,RDD,RDD-method
12731293# ' @noRd
12741294setMethod ("aggregateRDD ",
@@ -1367,12 +1387,14 @@ setMethod("setName",
13671387# ' @return An RDD with zipped items.
13681388# ' @seealso zipWithIndex
13691389# ' @examples
1390+ # nolint start
13701391# '\dontrun{
13711392# ' sc <- sparkR.init()
13721393# ' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
13731394# ' collect(zipWithUniqueId(rdd))
13741395# ' # list(list("a", 0), list("b", 3), list("c", 1), list("d", 4), list("e", 2))
13751396# '}
1397+ # nolint end
13761398# ' @rdname zipWithUniqueId
13771399# ' @aliases zipWithUniqueId,RDD
13781400# ' @noRd
@@ -1408,12 +1430,14 @@ setMethod("zipWithUniqueId",
14081430# ' @return An RDD with zipped items.
14091431# ' @seealso zipWithUniqueId
14101432# ' @examples
1433+ # nolint start
14111434# '\dontrun{
14121435# ' sc <- sparkR.init()
14131436# ' rdd <- parallelize(sc, list("a", "b", "c", "d", "e"), 3L)
14141437# ' collect(zipWithIndex(rdd))
14151438# ' # list(list("a", 0), list("b", 1), list("c", 2), list("d", 3), list("e", 4))
14161439# '}
1440+ # nolint end
14171441# ' @rdname zipWithIndex
14181442# ' @aliases zipWithIndex,RDD
14191443# ' @noRd
@@ -1454,12 +1478,14 @@ setMethod("zipWithIndex",
14541478# ' @return An RDD created by coalescing all elements within
14551479# ' each partition into a list.
14561480# ' @examples
1481+ # nolint start
14571482# '\dontrun{
14581483# ' sc <- sparkR.init()
14591484# ' rdd <- parallelize(sc, as.list(1:4), 2L)
14601485# ' collect(glom(rdd))
14611486# ' # list(list(1, 2), list(3, 4))
14621487# '}
1488+ # nolint end
14631489# ' @rdname glom
14641490# ' @aliases glom,RDD
14651491# ' @noRd
@@ -1519,13 +1545,15 @@ setMethod("unionRDD",
15191545# ' @param other Another RDD to be zipped.
15201546# ' @return An RDD zipped from the two RDDs.
15211547# ' @examples
1548+ # nolint start
15221549# '\dontrun{
15231550# ' sc <- sparkR.init()
15241551# ' rdd1 <- parallelize(sc, 0:4)
15251552# ' rdd2 <- parallelize(sc, 1000:1004)
15261553# ' collect(zipRDD(rdd1, rdd2))
15271554# ' # list(list(0, 1000), list(1, 1001), list(2, 1002), list(3, 1003), list(4, 1004))
15281555# '}
1556+ # nolint end
15291557# ' @rdname zipRDD
15301558# ' @aliases zipRDD,RDD
15311559# ' @noRd
@@ -1557,12 +1585,14 @@ setMethod("zipRDD",
15571585# ' @param other An RDD.
15581586# ' @return A new RDD which is the Cartesian product of these two RDDs.
15591587# ' @examples
1588+ # nolint start
15601589# '\dontrun{
15611590# ' sc <- sparkR.init()
15621591# ' rdd <- parallelize(sc, 1:2)
15631592# ' sortByKey(cartesian(rdd, rdd))
15641593# ' # list(list(1, 1), list(1, 2), list(2, 1), list(2, 2))
15651594# '}
1595+ # nolint end
15661596# ' @rdname cartesian
15671597# ' @aliases cartesian,RDD,RDD-method
15681598# ' @noRd
@@ -1587,13 +1617,15 @@ setMethod("cartesian",
15871617# ' @param numPartitions Number of the partitions in the result RDD.
15881618# ' @return An RDD with the elements from this that are not in other.
15891619# ' @examples
1620+ # nolint start
15901621# '\dontrun{
15911622# ' sc <- sparkR.init()
15921623# ' rdd1 <- parallelize(sc, list(1, 1, 2, 2, 3, 4))
15931624# ' rdd2 <- parallelize(sc, list(2, 4))
15941625# ' collect(subtract(rdd1, rdd2))
15951626# ' # list(1, 1, 3)
15961627# '}
1628+ # nolint end
15971629# ' @rdname subtract
15981630# ' @aliases subtract,RDD
15991631# ' @noRd
@@ -1619,13 +1651,15 @@ setMethod("subtract",
16191651# ' @param numPartitions The number of partitions in the result RDD.
16201652# ' @return An RDD which is the intersection of these two RDDs.
16211653# ' @examples
1654+ # nolint start
16221655# '\dontrun{
16231656# ' sc <- sparkR.init()
16241657# ' rdd1 <- parallelize(sc, list(1, 10, 2, 3, 4, 5))
16251658# ' rdd2 <- parallelize(sc, list(1, 6, 2, 3, 7, 8))
16261659# ' collect(sortBy(intersection(rdd1, rdd2), function(x) { x }))
16271660# ' # list(1, 2, 3)
16281661# '}
1662+ # nolint end
16291663# ' @rdname intersection
16301664# ' @aliases intersection,RDD
16311665# ' @noRd
@@ -1653,6 +1687,7 @@ setMethod("intersection",
16531687# ' Assumes that all the RDDs have the *same number of partitions*, but
16541688# ' does *not* require them to have the same number of elements in each partition.
16551689# ' @examples
1690+ # nolint start
16561691# '\dontrun{
16571692# ' sc <- sparkR.init()
16581693# ' rdd1 <- parallelize(sc, 1:2, 2L) # 1, 2
@@ -1662,6 +1697,7 @@ setMethod("intersection",
16621697# ' func = function(x, y, z) { list(list(x, y, z))} ))
16631698# ' # list(list(1, c(1,2), c(1,2,3)), list(2, c(3,4), c(4,5,6)))
16641699# '}
1700+ # nolint end
16651701# ' @rdname zipRDD
16661702# ' @aliases zipPartitions,RDD
16671703# ' @noRd
0 commit comments