Skip to content

Commit 45d7bce

Browse files
committed
DOCSP-31213 - streaming config (#183)
(cherry picked from commit 7ab9ace)
1 parent 19b7157 commit 45d7bce

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+2030
-1523
lines changed

config/redirects

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,3 +33,35 @@ raw: ${prefix}/sparkR -> ${base}/current/r-api/
3333
(v1.1-*]: ${prefix}/${version}/spark-sql -> ${base}/${version}/
3434
(v1.1-*]: ${prefix}/${version}/sparkR -> ${base}/${version}/r-api/
3535
[*-v2.0]: ${prefix}/${version}/release-notes -> ${base}/${version}/
36+
37+
[v10.0-*]: ${prefix}/${version}/java-api -> ${base}/${version}/
38+
[v10.0-*]: ${prefix}/${version}/python-api -> ${base}/${version}/
39+
[v10.0-*]: ${prefix}/${version}/r-api -> ${base}/${version}/
40+
[v10.0-*]: ${prefix}/${version}/scala-api -> ${base}/${version}/
41+
[v10.0-*]: ${prefix}/${version}/python/filters-and-sql -> ${base}/${version}/
42+
[v10.0-*]: ${prefix}/${version}/r/filters-and-sql -> ${base}/${version}/
43+
[v10.0-*]: ${prefix}/${version}/scala/datasets-and-sql -> ${base}/${version}/
44+
[v10.0-*]: ${prefix}/${version}/java/datasets-and-sql -> ${base}/${version}/
45+
[v10.0-*]: ${prefix}/${version}/java/aggregation -> ${base}/${version}/
46+
[v10.0-*]: ${prefix}/${version}/r/aggregation -> ${base}/${version}/
47+
[v10.0-*]: ${prefix}/${version}/python/aggregation -> ${base}/${version}/
48+
[v10.0-*]: ${prefix}/${version}/scala/aggregation -> ${base}/${version}/
49+
[v10.0-*]: ${prefix}/${version}/scala/read-from-mongodb -> ${base}/${version}/read-from-mongodb/
50+
[v10.0-*]: ${prefix}/${version}/r/read-from-mongodb -> ${base}/${version}/read-from-mongodb/
51+
[v10.0-*]: ${prefix}/${version}/python/read-from-mongodb -> ${base}/${version}/read-from-mongodb/
52+
[v10.0-*]: ${prefix}/${version}/java/read-from-mongodb -> ${base}/${version}/read-from-mongodb/
53+
[v10.0-*]: ${prefix}/${version}/java/write-to-mongodb -> ${base}/${version}/write-to-mongodb/
54+
[v10.0-*]: ${prefix}/${version}/scala/write-to-mongodb -> ${base}/${version}/write-to-mongodb/
55+
[v10.0-*]: ${prefix}/${version}/r/write-to-mongodb -> ${base}/${version}/write-to-mongodb/
56+
[v10.0-*]: ${prefix}/${version}/python/write-to-mongodb -> ${base}/${version}/write-to-mongodb/
57+
[v10.0-*]: ${prefix}/${version}/scala/streaming -> ${base}/${version}/structured-streaming/
58+
[*-v3.0]: ${prefix}/${version}/configuration/write -> ${base}/${version}/
59+
[*-v3.0]: ${prefix}/${version}/configuration/read -> ${base}/${version}/
60+
[*-v3.0]: ${prefix}/${version}/write-to-mongodb -> ${base}/${version}/
61+
[*-v3.0]: ${prefix}/${version}/read-from-mongodb -> ${base}/${version}/
62+
[*-v3.0]: ${prefix}/${version}/structured-streaming -> ${base}/${version}/
63+
[v10.0-*]: ${prefix}/${version}/configuration/write -> ${base}/${version}/batch-mode/batch-write-config/
64+
[v10.0-*]: ${prefix}/${version}/configuration/read -> ${base}/${version}/batch-mode/batch-read-config/
65+
[v10.0-*]: ${prefix}/${version}/write-to-mongodb -> ${base}/${version}/batch-mode/batch-write/
66+
[v10.0-*]: ${prefix}/${version}/read-from-mongodb -> ${base}/${version}/batch-mode/batch-read/
67+
[v10.0-*]: ${prefix}/${version}/structured-streaming -> ${base}/${version}/streaming-mode/

snooty.toml

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,15 @@ title = "MongoDB Spark Connector"
33

44
intersphinx = ["https://www.mongodb.com/docs/manual/objects.inv"]
55

6-
toc_landing_pages = ["configuration"]
6+
toc_landing_pages = [
7+
"configuration",
8+
"/batch-mode",
9+
"/streaming-mode",
10+
"/streaming-mode/streaming-read",
11+
"/streaming-mode/streaming-write",
12+
"/batch-mode/batch-write",
13+
"/batch-mode/batch-read",
14+
]
715

816
[constants]
917
connector-short = "Spark Connector"

source/batch-mode.txt

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
==========
2+
Batch Mode
3+
==========
4+
5+
.. contents:: On this page
6+
:local:
7+
:backlinks: none
8+
:depth: 1
9+
:class: singlecol
10+
11+
.. toctree::
12+
13+
/batch-mode/batch-read
14+
/batch-mode/batch-write
15+
16+
Overview
17+
--------
18+
19+
In batch mode, you can use the Spark Dataset and DataFrame APIs to process data at
20+
a specified time interval.
21+
22+
The following sections show you how to use the {+connector-short+} to read data from
23+
MongoDB and write data to MongoDB in batch mode:
24+
25+
- :ref:`batch-read-from-mongodb`
26+
- :ref:`batch-write-to-mongodb`
27+
28+
.. tip:: Apache Spark Documentation
29+
30+
To learn more about using Spark to process batches of data, see the
31+
`Spark Programming Guide
32+
<https://spark.apache.org/docs/latest/sql-programming-guide.html>`__.

0 commit comments

Comments
 (0)