aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-04-02 00:00:19 -0700
committerReynold Xin <rxin@databricks.com>2016-04-02 00:00:19 -0700
commit67d753516da9b6318cd4001bb7ae91703aaf098d (patch)
tree2b3b6cc724300251ed100e230eaa3c78c0bb7bc1
parentd7982a3a9aa804e7e3a2004335e7f314867a5f8a (diff)
downloadspark-67d753516da9b6318cd4001bb7ae91703aaf098d.tar.gz
spark-67d753516da9b6318cd4001bb7ae91703aaf098d.tar.bz2
spark-67d753516da9b6318cd4001bb7ae91703aaf098d.zip
[HOTFIX] Fix compilation break.
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala8
2 files changed, 4 insertions, 5 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
index d5db9db36b..1328142704 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala
@@ -21,6 +21,7 @@ import java.io.{File, FileNotFoundException, IOException}
import java.net.URI
import java.util.ConcurrentModificationException
+import scala.language.implicitConversions
import scala.util.Random
import org.apache.hadoop.conf.Configuration
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala
index b63ce89d18..3af7c01e52 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala
@@ -18,8 +18,9 @@
package org.apache.spark.sql.streaming
import org.apache.spark.SparkException
-import org.apache.spark.sql.{Encoder, StreamTest, SumOf, TypedColumn}
+import org.apache.spark.sql.StreamTest
import org.apache.spark.sql.execution.streaming._
+import org.apache.spark.sql.expressions.scala.typed
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
@@ -118,11 +119,8 @@ class StreamingAggregationSuite extends StreamTest with SharedSQLContext {
}
test("typed aggregators") {
- def sum[I, N : Numeric : Encoder](f: I => N): TypedColumn[I, N] =
- new SumOf(f).toColumn
-
val inputData = MemoryStream[(String, Int)]
- val aggregated = inputData.toDS().groupByKey(_._1).agg(sum(_._2))
+ val aggregated = inputData.toDS().groupByKey(_._1).agg(typed.sumLong(_._2))
testStream(aggregated)(
AddData(inputData, ("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)),