Skip to content

Commit 88547a0

Browse files
sryzarxin
authored andcommitted
SPARK-3422. JavaAPISuite.getHadoopInputSplits isn't used anywhere.
Author: Sandy Ryza <[email protected]> Closes #2324 from sryza/sandy-spark-3422 and squashes the following commits: 6446175 [Sandy Ryza] SPARK-3422. JavaAPISuite.getHadoopInputSplits isn't used anywhere.
1 parent 1e03cf7 commit 88547a0

File tree

1 file changed

+0
-25
lines changed

1 file changed

+0
-25
lines changed

core/src/test/java/org/apache/spark/JavaAPISuite.java

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -29,27 +29,21 @@
2929
import com.google.common.collect.Iterators;
3030
import com.google.common.collect.Lists;
3131
import com.google.common.collect.Maps;
32-
import com.google.common.collect.Sets;
3332
import com.google.common.base.Optional;
3433
import com.google.common.base.Charsets;
3534
import com.google.common.io.Files;
3635
import org.apache.hadoop.io.IntWritable;
37-
import org.apache.hadoop.io.LongWritable;
3836
import org.apache.hadoop.io.Text;
3937
import org.apache.hadoop.io.compress.DefaultCodec;
40-
import org.apache.hadoop.mapred.FileSplit;
41-
import org.apache.hadoop.mapred.InputSplit;
4238
import org.apache.hadoop.mapred.SequenceFileInputFormat;
4339
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
44-
import org.apache.hadoop.mapred.TextInputFormat;
4540
import org.apache.hadoop.mapreduce.Job;
4641
import org.junit.After;
4742
import org.junit.Assert;
4843
import org.junit.Before;
4944
import org.junit.Test;
5045

5146
import org.apache.spark.api.java.JavaDoubleRDD;
52-
import org.apache.spark.api.java.JavaHadoopRDD;
5347
import org.apache.spark.api.java.JavaPairRDD;
5448
import org.apache.spark.api.java.JavaRDD;
5549
import org.apache.spark.api.java.JavaSparkContext;
@@ -1313,23 +1307,4 @@ public void collectUnderlyingScalaRDD() {
13131307
SomeCustomClass[] collected = (SomeCustomClass[]) rdd.rdd().retag(SomeCustomClass.class).collect();
13141308
Assert.assertEquals(data.size(), collected.length);
13151309
}
1316-
1317-
public void getHadoopInputSplits() {
1318-
String outDir = new File(tempDir, "output").getAbsolutePath();
1319-
sc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 2).saveAsTextFile(outDir);
1320-
1321-
JavaHadoopRDD<LongWritable, Text> hadoopRDD = (JavaHadoopRDD<LongWritable, Text>)
1322-
sc.hadoopFile(outDir, TextInputFormat.class, LongWritable.class, Text.class);
1323-
List<String> inputPaths = hadoopRDD.mapPartitionsWithInputSplit(
1324-
new Function2<InputSplit, Iterator<Tuple2<LongWritable, Text>>, Iterator<String>>() {
1325-
@Override
1326-
public Iterator<String> call(InputSplit split, Iterator<Tuple2<LongWritable, Text>> it)
1327-
throws Exception {
1328-
FileSplit fileSplit = (FileSplit) split;
1329-
return Lists.newArrayList(fileSplit.getPath().toUri().getPath()).iterator();
1330-
}
1331-
}, true).collect();
1332-
Assert.assertEquals(Sets.newHashSet(inputPaths),
1333-
Sets.newHashSet(outDir + "/part-00000", outDir + "/part-00001"));
1334-
}
13351310
}

0 commit comments

Comments
 (0)