aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcelo Vanzin <vanzin@cloudera.com>2016-12-13 10:02:19 -0800
committerMarcelo Vanzin <vanzin@cloudera.com>2016-12-13 10:02:19 -0800
commitf280ccf449f62a00eb4042dfbcf7a0715850fd4c (patch)
treecf96b9cb526970ea6591bc6c73d9a8f829a2108e
parentfb3081d3b38a50aa5e023c603e1b191e57f7c876 (diff)
downloadspark-f280ccf449f62a00eb4042dfbcf7a0715850fd4c.tar.gz
spark-f280ccf449f62a00eb4042dfbcf7a0715850fd4c.tar.bz2
spark-f280ccf449f62a00eb4042dfbcf7a0715850fd4c.zip
[SPARK-18835][SQL] Don't expose Guava types in the JavaTypeInference API.
This avoids issues during maven tests because of shading. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #16260 from vanzin/SPARK-18835.
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala12
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala4
2 files changed, 12 insertions, 4 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
index 7e8e4dab72..8b53d988cb 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
@@ -19,6 +19,7 @@ package org.apache.spark.sql.catalyst
import java.beans.{Introspector, PropertyDescriptor}
import java.lang.{Iterable => JIterable}
+import java.lang.reflect.Type
import java.util.{Iterator => JIterator, List => JList, Map => JMap}
import scala.language.existentials
@@ -56,10 +57,19 @@ object JavaTypeInference {
/**
* Infers the corresponding SQL data type of a Java type.
+ * @param beanType Java type
+ * @return (SQL data type, nullable)
+ */
+ private[sql] def inferDataType(beanType: Type): (DataType, Boolean) = {
+ inferDataType(TypeToken.of(beanType))
+ }
+
+ /**
+ * Infers the corresponding SQL data type of a Java type.
* @param typeToken Java type
* @return (SQL data type, nullable)
*/
- private[sql] def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = {
+ private def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = {
typeToken.getRawType match {
case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) =>
(c.getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance(), true)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala b/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala
index c8be89c646..d94185b390 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala
@@ -23,8 +23,6 @@ import java.lang.reflect.{ParameterizedType, Type}
import scala.reflect.runtime.universe.TypeTag
import scala.util.Try
-import com.google.common.reflect.TypeToken
-
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.internal.Logging
import org.apache.spark.sql.api.java._
@@ -446,7 +444,7 @@ class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) extends
val udfReturnType = udfInterfaces(0).getActualTypeArguments.last
var returnType = returnDataType
if (returnType == null) {
- returnType = JavaTypeInference.inferDataType(TypeToken.of(udfReturnType))._1
+ returnType = JavaTypeInference.inferDataType(udfReturnType)._1
}
udfInterfaces(0).getActualTypeArguments.length match {