Skip to content

Commit 3f7b2d8

Browse files
committed
Initialize converters lazily so that the attributes are resolved first
1 parent 6ad0ebb commit 3f7b2d8

File tree

1 file changed

+3
-12
lines changed
  • sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions

1 file changed

+3
-12
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,10 @@
1717

1818
package org.apache.spark.sql.catalyst.expressions
1919

20-
import java.io.{ObjectInputStream, IOException}
21-
2220
import scala.collection.Map
2321

2422
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, trees}
2523
import org.apache.spark.sql.types._
26-
import org.apache.spark.util.Utils
2724

2825
/**
2926
* An expression that produces zero or more rows given a single input row.
@@ -85,16 +82,10 @@ case class UserDefinedGenerator(
8582
}.asInstanceOf[(Row => Row)]
8683
}
8784

88-
initializeConverters()
89-
90-
@throws(classOf[IOException])
91-
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
92-
ois.defaultReadObject()
93-
initializeConverters()
94-
}
95-
9685
override def eval(input: Row): TraversableOnce[Row] = {
97-
// TODO(davies): improve this
86+
if (inputRow == null) {
87+
initializeConverters()
88+
}
9889
// Convert the objects into Scala Type before calling function, we need schema to support UDT
9990
function(convertToScala(inputRow(input)))
10091
}

0 commit comments

Comments
 (0)