diff options
author | Subhobrata Dey <sbcd90@gmail.com> | 2016-05-10 12:32:56 -0700 |
---|---|---|
committer | Shixiong Zhu <shixiong@databricks.com> | 2016-05-10 12:32:56 -0700 |
commit | 89f73f674126bbc1cc101f0f5731b5750f1c90c8 (patch) | |
tree | e9bdd84d6bf0fba02ff1d2fb3a4e940fa514ed6a /sql/core | |
parent | 93353b0113158c87e09f0bad91a663a92e9cf1bc (diff) | |
download | spark-89f73f674126bbc1cc101f0f5731b5750f1c90c8.tar.gz spark-89f73f674126bbc1cc101f0f5731b5750f1c90c8.tar.bz2 spark-89f73f674126bbc1cc101f0f5731b5750f1c90c8.zip |
[SPARK-14642][SQL] import org.apache.spark.sql.expressions._ breaks udf under functions
## What changes were proposed in this pull request?
PR fixes the import issue which breaks udf functions.
The following code snippet throws an error
```
scala> import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions._
scala> import org.apache.spark.sql.expressions._
import org.apache.spark.sql.expressions._
scala> udf((v: String) => v.stripSuffix("-abc"))
<console>:30: error: No TypeTag available for String
udf((v: String) => v.stripSuffix("-abc"))
```
This PR resolves the issue.
## How was this patch tested?
patch tested with unit tests.
(If this patch involves UI changes, please attach a screenshot; otherwise, remove this)
Author: Subhobrata Dey <sbcd90@gmail.com>
Closes #12458 from sbcd90/udfFuncBreak.
Diffstat (limited to 'sql/core')
-rw-r--r-- | sql/core/src/main/java/org/apache/spark/sql/expressions/javalang/typed.java (renamed from sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java) | 4 | ||||
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/expressions/scalalang/typed.scala (renamed from sql/core/src/main/scala/org/apache/spark/sql/expressions/scala/typed.scala) | 4 | ||||
-rw-r--r-- | sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaDatasetAggregatorSuite.java | 2 | ||||
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala | 2 | ||||
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala | 2 | ||||
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala | 2 | ||||
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala | 2 |
7 files changed, 9 insertions, 9 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java b/sql/core/src/main/java/org/apache/spark/sql/expressions/javalang/typed.java index c7c6e3868f..247e94b86c 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java +++ b/sql/core/src/main/java/org/apache/spark/sql/expressions/javalang/typed.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.spark.sql.expressions.java; +package org.apache.spark.sql.expressions.javalang; import org.apache.spark.annotation.Experimental; import org.apache.spark.api.java.function.MapFunction; @@ -29,7 +29,7 @@ import org.apache.spark.sql.execution.aggregate.TypedSumLong; * :: Experimental :: * Type-safe functions available for {@link org.apache.spark.sql.Dataset} operations in Java. * - * Scala users should use {@link org.apache.spark.sql.expressions.scala.typed}. + * Scala users should use {@link org.apache.spark.sql.expressions.scalalang.typed}. * * @since 2.0.0 */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/scala/typed.scala b/sql/core/src/main/scala/org/apache/spark/sql/expressions/scalalang/typed.scala index d0eb190afd..f46a4a7879 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/scala/typed.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/scalalang/typed.scala @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.spark.sql.expressions.scala +package org.apache.spark.sql.expressions.scalalang import org.apache.spark.annotation.Experimental import org.apache.spark.sql._ @@ -25,7 +25,7 @@ import org.apache.spark.sql.execution.aggregate._ * :: Experimental :: * Type-safe functions available for [[Dataset]] operations in Scala. * - * Java users should use [[org.apache.spark.sql.expressions.java.typed]]. + * Java users should use [[org.apache.spark.sql.expressions.javalang.typed]]. * * @since 2.0.0 */ diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaDatasetAggregatorSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaDatasetAggregatorSuite.java index 0e49f871de..f9842e130b 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaDatasetAggregatorSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/sources/JavaDatasetAggregatorSuite.java @@ -30,7 +30,7 @@ import org.apache.spark.sql.Encoder; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.KeyValueGroupedDataset; import org.apache.spark.sql.expressions.Aggregator; -import org.apache.spark.sql.expressions.java.typed; +import org.apache.spark.sql.expressions.javalang.typed; /** * Suite for testing the aggregate functionality of Datasets in Java. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala index b2a0f3d67e..f1585ca3ff 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala @@ -21,7 +21,7 @@ import scala.language.postfixOps import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.expressions.Aggregator -import org.apache.spark.sql.expressions.scala.typed +import org.apache.spark.sql.expressions.scalalang.typed import org.apache.spark.sql.functions._ import org.apache.spark.sql.test.SharedSQLContext diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala index d8e241c62f..4101e5c75b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.sql.expressions.Aggregator -import org.apache.spark.sql.expressions.scala.typed +import org.apache.spark.sql.expressions.scalalang.typed import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.StringType import org.apache.spark.util.Benchmark diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala index ada60f6919..f86955e5a5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.Row import org.apache.spark.sql.execution.aggregate.TungstenAggregate import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec -import org.apache.spark.sql.expressions.scala.typed +import org.apache.spark.sql.expressions.scalalang.typed import org.apache.spark.sql.functions.{avg, broadcast, col, max} import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types.{IntegerType, StringType, StructType} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala index 8da7742ffe..0f5fc9ca72 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala @@ -24,7 +24,7 @@ import org.apache.spark.sql.StreamTest import org.apache.spark.sql.catalyst.analysis.Update import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.state.StateStore -import org.apache.spark.sql.expressions.scala.typed +import org.apache.spark.sql.expressions.scalalang.typed import org.apache.spark.sql.functions._ import org.apache.spark.sql.test.SharedSQLContext |