diff --git a/docs/configuration.md b/docs/configuration.md
index 64aa94f622afa..110fdb90ab427 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -852,6 +852,41 @@ Apart from these, the following properties are also available, and may be useful
between nodes leading to flooding the network with those.
+
+ spark.shuffle.io.preferDirectBufs |
+ true |
+
+ (Netty only) Off-heap buffers are used to reduce garbage collection during shuffle and cache
+ block transfer. For environments where off-heap memory is tightly limited, users may wish to
+ turn this off to force all allocations from Netty to be on-heap.
+ |
+
+
+ spark.shuffle.io.numConnectionsPerPeer |
+ 1 |
+
+ (Netty only) Connections between hosts are reused in order to reduce connection buildup for
+ large clusters. For clusters with many hard disks and few hosts, this may result in insufficient
+ concurrency to saturate all disks, and so users may consider increasing this value.
+ |
+
+
+ spark.shuffle.io.maxRetries |
+ 3 |
+
+ (Netty only) Fetches that fail due to IO-related exceptions are automatically retried if this is
+ set to a non-zero value. This retry logic helps stabilize large shuffles in the face of long GC
+ pauses or transient network connectivity issues.
+ |
+
+
+ spark.shuffle.io.retryWait |
+ 5 |
+
+ (Netty only) Seconds to wait between retries of fetches. The maximum delay caused by retrying
+ is simply maxRetries * retryWait, by default 15 seconds.
+ |
+
#### Scheduling
diff --git a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
index 13b37f96f8ce2..7c9adf52af0f0 100644
--- a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
+++ b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
@@ -40,7 +40,7 @@ public int connectionTimeoutMs() {
return conf.getInt("spark.shuffle.io.connectionTimeout", 120) * 1000;
}
- /** Number of concurrent connections between two nodes for fetching data. **/
+ /** Number of concurrent connections between two nodes for fetching data. */
public int numConnectionsPerPeer() {
return conf.getInt("spark.shuffle.io.numConnectionsPerPeer", 1);
}