@@ -131,7 +131,8 @@ df.show();
131131
132132<div data-lang =" python " markdown =" 1 " >
133133{% highlight python %}
134- sqlContext <- sparkRSQL.init(sc)
134+ from pyspark.sql import SQLContext
135+ sqlContext = SQLContext(sc)
135136
136137df = sqlContext.jsonFile("examples/src/main/resources/people.json")
137138
@@ -1179,14 +1180,10 @@ df3.printSchema()
11791180# sqlContext from the previous example is used in this example.
11801181
11811182# Create a simple DataFrame, stored into a partition directory
1182- rdd1 <- map(parallelize(sc, 1:5), function(i){ list(single = i, double = i * 2)})
1183- df1 <- createDataFrame(sqlContext, rdd1)
11841183saveDF(df1, "data/test_table/key=1", "parquet", "overwrite")
11851184
11861185# Create another DataFrame in a new partition directory,
11871186# adding a new column and dropping an existing column
1188- rdd2 <- map(parallelize(sc, 6:11), function(i){ list(single=i, triple=i* 3)})
1189- df2 <- createDataFrame(sqlContext, rdd2)
11901187saveDF(df2, "data/test_table/key=2", "parquet", "overwrite")
11911188
11921189# Read the partitioned table
0 commit comments