Skip to content

Commit 04e2273

Browse files
committed
Cleanup 'import SparkContext._' in core
1 parent 5e7a6dc commit 04e2273

36 files changed

+8
-44
lines changed

core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ import com.google.common.base.Optional
2828
import org.apache.hadoop.io.compress.CompressionCodec
2929

3030
import org.apache.spark._
31-
import org.apache.spark.SparkContext._
3231
import org.apache.spark.annotation.Experimental
3332
import org.apache.spark.api.java.JavaPairRDD._
3433
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag

core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import org.apache.hadoop.io.compress.CompressionCodec
3434
import org.apache.hadoop.mapred.{InputFormat, OutputFormat, JobConf}
3535
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, OutputFormat => NewOutputFormat}
3636
import org.apache.spark._
37-
import org.apache.spark.SparkContext._
3837
import org.apache.spark.api.java.{JavaSparkContext, JavaPairRDD, JavaRDD}
3938
import org.apache.spark.broadcast.Broadcast
4039
import org.apache.spark.rdd.RDD

core/src/main/scala/org/apache/spark/package.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ package org.apache
2727
* contains operations available only on RDDs of Doubles; and
2828
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that can
2929
* be saved as SequenceFiles. These operations are automatically available on any RDD of the right
30-
* type (e.g. RDD[(Int, Int)] through implicit conversions when you
31-
* `import org.apache.spark.SparkContext._`.
30+
* type (e.g. RDD[(Int, Int)] through implicit conversions except `saveAsSequenceFile`. You need to
31+
* `import org.apache.spark.SparkContext._` to make `saveAsSequenceFile` work.
3232
*
3333
* Java programmers should reference the [[org.apache.spark.api.java]] package
3434
* for Spark programming APIs in Java.

core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import org.apache.spark.{ComplexFutureAction, FutureAction, Logging}
2727

2828
/**
2929
* A set of asynchronous RDD actions available through an implicit conversion.
30-
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
3130
*/
3231
class AsyncRDDActions[T: ClassTag](self: RDD[T]) extends Serializable with Logging {
3332

core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import org.apache.spark.util.StatCounter
2727

2828
/**
2929
* Extra functions available on RDDs of Doubles through an implicit conversion.
30-
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
3130
*/
3231
class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
3332
/** Add up the elements in this RDD. */

core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,9 @@ import org.apache.spark.annotation.DeveloperApi
2424

2525
/**
2626
* Extra functions available on RDDs of (key, value) pairs where the key is sortable through
27-
* an implicit conversion. Import `org.apache.spark.SparkContext._` at the top of your program to
28-
* use these functions. They will work with any key type `K` that has an implicit `Ordering[K]` in
29-
* scope. Ordering objects already exist for all of the standard primitive types. Users can also
30-
* define their own orderings for custom types, or to override the default ordering. The implicit
27+
* an implicit conversion. They will work with any key type `K` that has an implicit `Ordering[K]`
28+
* in scope. Ordering objects already exist for all of the standard primitive types. Users can also
29+
* define their own orderings for custom types, or to override the default ordering. The implicit
3130
* ordering that is in the closest scope will be used.
3231
*
3332
* {{{

core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ RecordWriter => NewRecordWriter}
3737

3838
import org.apache.spark._
3939
import org.apache.spark.Partitioner.defaultPartitioner
40-
import org.apache.spark.SparkContext._
4140
import org.apache.spark.annotation.Experimental
4241
import org.apache.spark.deploy.SparkHadoopUtil
4342
import org.apache.spark.executor.{DataWriteMethod, OutputMetrics}
@@ -50,7 +49,6 @@ import org.apache.spark.util.random.StratifiedSamplingUtils
5049

5150
/**
5251
* Extra functions available on RDDs of (key, value) pairs through an implicit conversion.
53-
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
5452
*/
5553
class PairRDDFunctions[K, V](self: RDD[(K, V)])
5654
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import org.apache.hadoop.mapred.TextOutputFormat
3434

3535
import org.apache.spark._
3636
import org.apache.spark.Partitioner._
37-
import org.apache.spark.SparkContext._
3837
import org.apache.spark.annotation.{DeveloperApi, Experimental}
3938
import org.apache.spark.api.java.JavaRDD
4039
import org.apache.spark.broadcast.Broadcast
@@ -58,8 +57,9 @@ import org.apache.spark.util.random.{BernoulliSampler, PoissonSampler, Bernoulli
5857
* Doubles; and
5958
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
6059
* can be saved as SequenceFiles.
61-
* These operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
62-
* through implicit conversions when you `import org.apache.spark.SparkContext._`.
60+
* All operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
61+
* through implicit conversions except `saveAsSequenceFile`. You need to
62+
* `import org.apache.spark.SparkContext._` to make `saveAsSequenceFile` work.
6363
*
6464
* Internally, each RDD is characterized by five main properties:
6565
*

core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ import org.apache.hadoop.mapred.JobConf
2424
import org.apache.hadoop.mapred.SequenceFileOutputFormat
2525

2626
import org.apache.spark.Logging
27-
import org.apache.spark.SparkContext._
2827

2928
/**
3029
* Extra functions available on RDDs of (key, value) pairs to create a Hadoop SequenceFile,

core/src/main/scala/org/apache/spark/ui/UIWorkloadGenerator.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ package org.apache.spark.ui
2020
import scala.util.Random
2121

2222
import org.apache.spark.{SparkConf, SparkContext}
23-
import org.apache.spark.SparkContext._
2423
import org.apache.spark.scheduler.SchedulingMode
2524

2625
/**

0 commit comments

Comments
 (0)