diff options
author | Reynold Xin <rxin@databricks.com> | 2015-05-27 11:54:35 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2015-05-27 11:54:42 -0700 |
commit | 0468d57a6fe42a7f06ccd4ac1faad59c4dcc4c68 (patch) | |
tree | a823601df5d06f8da0abd304c8b7d954368fd42e | |
parent | 13044b0460e866804e6e3f058ebe38c0d005c1ff (diff) | |
download | spark-0468d57a6fe42a7f06ccd4ac1faad59c4dcc4c68.tar.gz spark-0468d57a6fe42a7f06ccd4ac1faad59c4dcc4c68.tar.bz2 spark-0468d57a6fe42a7f06ccd4ac1faad59c4dcc4c68.zip |
Removed Guava dependency from JavaTypeInference's type signature.
This should also close #6243.
Author: Reynold Xin <rxin@databricks.com>
Closes #6431 from rxin/JavaTypeInference-guava and squashes the following commits:
e58df3c [Reynold Xin] Removed Gauva dependency from JavaTypeInference's type signature.
(cherry picked from commit 6fec1a9409b34d8ce58ea1c330b52cc7ef3e7e7e)
Signed-off-by: Reynold Xin <rxin@databricks.com>
-rw-r--r-- | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala | 11 | ||||
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala | 4 |
2 files changed, 11 insertions, 4 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala index 625c8d3a62..9a3f9694e4 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala @@ -39,11 +39,20 @@ private [sql] object JavaTypeInference { private val valuesReturnType = classOf[JMap[_, _]].getMethod("values").getGenericReturnType /** + * Infers the corresponding SQL data type of a JavaClean class. + * @param beanClass Java type + * @return (SQL data type, nullable) + */ + def inferDataType(beanClass: Class[_]): (DataType, Boolean) = { + inferDataType(TypeToken.of(beanClass)) + } + + /** * Infers the corresponding SQL data type of a Java type. * @param typeToken Java type * @return (SQL data type, nullable) */ - private [sql] def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = { + private def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = { // TODO: All of this could probably be moved to Catalyst as it is mostly not Spark specific. typeToken.getRawType match { case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index 3935f7b321..15c30352be 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -27,8 +27,6 @@ import scala.language.implicitConversions import scala.reflect.runtime.universe.TypeTag import scala.util.control.NonFatal -import com.google.common.reflect.TypeToken - import org.apache.spark.SparkContext import org.apache.spark.annotation.{DeveloperApi, Experimental} import org.apache.spark.api.java.{JavaRDD, JavaSparkContext} @@ -1011,7 +1009,7 @@ class SQLContext(@transient val sparkContext: SparkContext) * Returns a Catalyst Schema for the given java bean class. */ protected def getSchema(beanClass: Class[_]): Seq[AttributeReference] = { - val (dataType, _) = JavaTypeInference.inferDataType(TypeToken.of(beanClass)) + val (dataType, _) = JavaTypeInference.inferDataType(beanClass) dataType.asInstanceOf[StructType].fields.map { f => AttributeReference(f.name, f.dataType, f.nullable)() } |