Skip to content

Commit a02679c

Browse files
committed
Fix scaladoc errors due to missing links, which are generating build warnings, from some recent doc changes. We apparently can't generate links outside the module.
1 parent b2c6a09 commit a02679c

File tree

7 files changed

+9
-9
lines changed

7 files changed

+9
-9
lines changed

mllib/src/main/scala/org/apache/spark/mllib/linalg/SVD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ class SVD {
7373
* U is a row-by-row dense matrix
7474
* S is a simple double array of singular values
7575
* V is a 2d array matrix
76-
* See [[denseSVD]] for more documentation
76+
* See `denseSVD` for more documentation
7777
*/
7878
def compute(matrix: RDD[Array[Double]]):
7979
(RDD[Array[Double]], Array[Double], Array[Array[Double]]) = {

sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ case class Exchange(newPartitioning: Partitioning, child: SparkPlan) extends Una
7070
}
7171

7272
/**
73-
* Ensures that the [[catalyst.plans.physical.Partitioning Partitioning]] of input data meets the
74-
* [[catalyst.plans.physical.Distribution Distribution]] requirements for each operator by inserting
73+
* Ensures that the Partitioning of input data meets the
74+
* Distribution requirements for each operator by inserting
7575
* [[Exchange]] Operators where required.
7676
*/
7777
object AddExchange extends Rule[SparkPlan] {
@@ -133,4 +133,4 @@ object AddExchange extends Rule[SparkPlan] {
133133
operator.withNewChildren(repartitionedChildren)
134134
}
135135
}
136-
}
136+
}

sql/core/src/main/scala/org/apache/spark/sql/execution/Generate.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution
2020
import org.apache.spark.sql.catalyst.expressions.{Generator, JoinedRow, Literal, Projection}
2121

2222
/**
23-
* Applies a [[catalyst.expressions.Generator Generator]] to a stream of input rows, combining the
23+
* Applies a Generator to a stream of input rows, combining the
2424
* output of each into a new stream of rows. This operation is similar to a `flatMap` in functional
2525
* programming with one important additional feature, which allows the input rows to be joined with
2626
* their output.

sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ import org.apache.hadoop.io.Writable
3030

3131
/**
3232
* Internal helper class that saves an RDD using a Hive OutputFormat.
33-
* It is based on [[SparkHadoopWriter]].
33+
* It is based on SparkHadoopWriter.
3434
*/
3535
protected[spark]
3636
class SparkHiveHadoopWriter(

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with Logging {
137137

138138
/**
139139
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
140-
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
140+
* For now, if this functionality is desired mix in the in-memory OverrideCatalog.
141141
*/
142142
override def registerTable(
143143
databaseName: Option[String], tableName: String, plan: LogicalPlan): Unit = ???

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ object HiveQl {
122122

123123
/**
124124
* A set of implicit transformations that allow Hive ASTNodes to be rewritten by transformations
125-
* similar to [[catalyst.trees.TreeNode]].
125+
* similar to TreeNode.
126126
*
127127
* Note that this should be considered very experimental and is not indented as a replacement
128128
* for TreeNode. Primarily it should be noted ASTNodes are not immutable and do not appear to

sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ case class HiveGenericUdaf(
359359

360360
/**
361361
* Converts a Hive Generic User Defined Table Generating Function (UDTF) to a
362-
* [[catalyst.expressions.Generator Generator]]. Note that the semantics of Generators do not allow
362+
* Generator. Note that the semantics of Generators do not allow
363363
* Generators to maintain state in between input rows. Thus UDTFs that rely on partitioning
364364
* dependent operations like calls to `close()` before producing output will not operate the same as
365365
* in Hive. However, in practice this should not affect compatibility for most sane UDTFs

0 commit comments

Comments
 (0)