Skip to content

Commit 99ce042

Browse files
committed
added saveAsTextFiles and saveAsPickledFiles
1 parent 2a06cdb commit 99ce042

File tree

2 files changed

+4
-1
lines changed

2 files changed

+4
-1
lines changed

python/pyspark/streaming/context.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,9 @@ def _testInputStream(self, test_inputs, numSlices=None):
153153
test_rdds.append(test_rdd._jrdd)
154154
test_rdd_deserializers.append(test_rdd._jrdd_deserializer)
155155

156+
# if len(set(test_rdd_deserializers)) > 1:
157+
# raise IOError("Deserializer should be one type to run test case. "
158+
# "See the SparkContext.parallelize to understand how to decide deserializer")
156159
jtest_rdds = ListConverter().convert(test_rdds, SparkContext._gateway._gateway_client)
157160
jinput_stream = self._jvm.PythonTestInputStream(self._jssc, jtest_rdds).asJavaDStream()
158161

streaming/src/main/scala/org/apache/spark/streaming/api/python/PythonDStream.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ class PythonTransformedDStream(
210210
* This is a input stream just for the unitest. This is equivalent to a checkpointable,
211211
* replayable, reliable message queue like Kafka. It requires a sequence as input, and
212212
* returns the i_th element at the i_th batch under manual clock.
213-
* This implementation is close to QueStream
213+
* This implementation is inspired by QueStream
214214
*/
215215

216216
class PythonTestInputStream(ssc_ : JavaStreamingContext, inputRDDs: JArrayList[JavaRDD[Array[Byte]]])

0 commit comments

Comments
 (0)