aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-27 11:54:35 -0700
committerReynold Xin <rxin@databricks.com>2015-05-27 11:54:35 -0700
commit6fec1a9409b34d8ce58ea1c330b52cc7ef3e7e7e (patch)
treec49721a726d4ab1aa019bb1cf8d8ec4616c0cdb7 /sql
parent0db76c90ad5f84d7a5640c41de74876b906ddc90 (diff)
downloadspark-6fec1a9409b34d8ce58ea1c330b52cc7ef3e7e7e.tar.gz
spark-6fec1a9409b34d8ce58ea1c330b52cc7ef3e7e7e.tar.bz2
spark-6fec1a9409b34d8ce58ea1c330b52cc7ef3e7e7e.zip
Removed Guava dependency from JavaTypeInference's type signature.
This should also close #6243. Author: Reynold Xin <rxin@databricks.com> Closes #6431 from rxin/JavaTypeInference-guava and squashes the following commits: e58df3c [Reynold Xin] Removed Gauva dependency from JavaTypeInference's type signature.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala11
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala4
2 files changed, 11 insertions, 4 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
index 625c8d3a62..9a3f9694e4 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala
@@ -39,11 +39,20 @@ private [sql] object JavaTypeInference {
private val valuesReturnType = classOf[JMap[_, _]].getMethod("values").getGenericReturnType
/**
+ * Infers the corresponding SQL data type of a JavaClean class.
+ * @param beanClass Java type
+ * @return (SQL data type, nullable)
+ */
+ def inferDataType(beanClass: Class[_]): (DataType, Boolean) = {
+ inferDataType(TypeToken.of(beanClass))
+ }
+
+ /**
* Infers the corresponding SQL data type of a Java type.
* @param typeToken Java type
* @return (SQL data type, nullable)
*/
- private [sql] def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = {
+ private def inferDataType(typeToken: TypeToken[_]): (DataType, Boolean) = {
// TODO: All of this could probably be moved to Catalyst as it is mostly not Spark specific.
typeToken.getRawType match {
case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) =>
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 3935f7b321..15c30352be 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -27,8 +27,6 @@ import scala.language.implicitConversions
import scala.reflect.runtime.universe.TypeTag
import scala.util.control.NonFatal
-import com.google.common.reflect.TypeToken
-
import org.apache.spark.SparkContext
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
@@ -1011,7 +1009,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
* Returns a Catalyst Schema for the given java bean class.
*/
protected def getSchema(beanClass: Class[_]): Seq[AttributeReference] = {
- val (dataType, _) = JavaTypeInference.inferDataType(TypeToken.of(beanClass))
+ val (dataType, _) = JavaTypeInference.inferDataType(beanClass)
dataType.asInstanceOf[StructType].fields.map { f =>
AttributeReference(f.name, f.dataType, f.nullable)()
}