aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-04-23 01:43:40 -0700
committerReynold Xin <rxin@databricks.com>2015-04-23 01:43:40 -0700
commitf60bece14f98450b4a71b00d7b58525f06e1f9ed (patch)
tree7c60f98f401d49bb3069035f6f585f8648b982f7 /sql/core/src/test
parent29163c520087e89ca322521db1dd8656d86a6f0e (diff)
downloadspark-f60bece14f98450b4a71b00d7b58525f06e1f9ed.tar.gz
spark-f60bece14f98450b4a71b00d7b58525f06e1f9ed.tar.bz2
spark-f60bece14f98450b4a71b00d7b58525f06e1f9ed.zip
[SPARK-7069][SQL] Rename NativeType -> AtomicType.
Also renamed JvmType to InternalType. Author: Reynold Xin <rxin@databricks.com> Closes #5651 from rxin/native-to-atomic-type and squashes the following commits: cbd4028 [Reynold Xin] [SPARK-7069][SQL] Rename NativeType -> AtomicType.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnStatsSuite.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala8
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnarTestUtils.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/RunLengthEncodingSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/TestCompressibleColumnBuilder.scala6
7 files changed, 20 insertions, 20 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnStatsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnStatsSuite.scala
index fec487f1d2..7cefcf4406 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnStatsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnStatsSuite.scala
@@ -34,7 +34,7 @@ class ColumnStatsSuite extends FunSuite {
testColumnStats(classOf[DateColumnStats], DATE, Row(Int.MaxValue, Int.MinValue, 0))
testColumnStats(classOf[TimestampColumnStats], TIMESTAMP, Row(null, null, 0))
- def testColumnStats[T <: NativeType, U <: ColumnStats](
+ def testColumnStats[T <: AtomicType, U <: ColumnStats](
columnStatsClass: Class[U],
columnType: NativeColumnType[T],
initialStatistics: Row): Unit = {
@@ -55,8 +55,8 @@ class ColumnStatsSuite extends FunSuite {
val rows = Seq.fill(10)(makeRandomRow(columnType)) ++ Seq.fill(10)(makeNullRow(1))
rows.foreach(columnStats.gatherStats(_, 0))
- val values = rows.take(10).map(_(0).asInstanceOf[T#JvmType])
- val ordering = columnType.dataType.ordering.asInstanceOf[Ordering[T#JvmType]]
+ val values = rows.take(10).map(_(0).asInstanceOf[T#InternalType])
+ val ordering = columnType.dataType.ordering.asInstanceOf[Ordering[T#InternalType]]
val stats = columnStats.collectedStatistics
assertResult(values.min(ordering), "Wrong lower bound")(stats(0))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
index b48bed1871..1e105e259d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnTypeSuite.scala
@@ -196,12 +196,12 @@ class ColumnTypeSuite extends FunSuite with Logging {
}
}
- def testNativeColumnType[T <: NativeType](
+ def testNativeColumnType[T <: AtomicType](
columnType: NativeColumnType[T],
- putter: (ByteBuffer, T#JvmType) => Unit,
- getter: (ByteBuffer) => T#JvmType): Unit = {
+ putter: (ByteBuffer, T#InternalType) => Unit,
+ getter: (ByteBuffer) => T#InternalType): Unit = {
- testColumnType[T, T#JvmType](columnType, putter, getter)
+ testColumnType[T, T#InternalType](columnType, putter, getter)
}
def testColumnType[T <: DataType, JvmType](
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnarTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnarTestUtils.scala
index f76314b9da..75d993e563 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnarTestUtils.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/ColumnarTestUtils.scala
@@ -24,7 +24,7 @@ import scala.util.Random
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
-import org.apache.spark.sql.types.{UTF8String, DataType, Decimal, NativeType}
+import org.apache.spark.sql.types.{UTF8String, DataType, Decimal, AtomicType}
object ColumnarTestUtils {
def makeNullRow(length: Int): GenericMutableRow = {
@@ -91,9 +91,9 @@ object ColumnarTestUtils {
row
}
- def makeUniqueValuesAndSingleValueRows[T <: NativeType](
+ def makeUniqueValuesAndSingleValueRows[T <: AtomicType](
columnType: NativeColumnType[T],
- count: Int): (Seq[T#JvmType], Seq[GenericMutableRow]) = {
+ count: Int): (Seq[T#InternalType], Seq[GenericMutableRow]) = {
val values = makeUniqueRandomValues(columnType, count)
val rows = values.map { value =>
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala
index c82d979935..64b70552eb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/DictionaryEncodingSuite.scala
@@ -24,14 +24,14 @@ import org.scalatest.FunSuite
import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
import org.apache.spark.sql.columnar._
import org.apache.spark.sql.columnar.ColumnarTestUtils._
-import org.apache.spark.sql.types.NativeType
+import org.apache.spark.sql.types.AtomicType
class DictionaryEncodingSuite extends FunSuite {
testDictionaryEncoding(new IntColumnStats, INT)
testDictionaryEncoding(new LongColumnStats, LONG)
testDictionaryEncoding(new StringColumnStats, STRING)
- def testDictionaryEncoding[T <: NativeType](
+ def testDictionaryEncoding[T <: AtomicType](
columnStats: ColumnStats,
columnType: NativeColumnType[T]) {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala
index 88011631ee..bfd99f143b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/IntegralDeltaSuite.scala
@@ -33,7 +33,7 @@ class IntegralDeltaSuite extends FunSuite {
columnType: NativeColumnType[I],
scheme: CompressionScheme) {
- def skeleton(input: Seq[I#JvmType]) {
+ def skeleton(input: Seq[I#InternalType]) {
// -------------
// Tests encoder
// -------------
@@ -120,13 +120,13 @@ class IntegralDeltaSuite extends FunSuite {
case LONG => Seq(2: Long, 1: Long, 2: Long, 130: Long)
}
- skeleton(input.map(_.asInstanceOf[I#JvmType]))
+ skeleton(input.map(_.asInstanceOf[I#InternalType]))
}
test(s"$scheme: long random series") {
// Have to workaround with `Any` since no `ClassTag[I#JvmType]` available here.
val input = Array.fill[Any](10000)(makeRandomValue(columnType))
- skeleton(input.map(_.asInstanceOf[I#JvmType]))
+ skeleton(input.map(_.asInstanceOf[I#InternalType]))
}
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/RunLengthEncodingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/RunLengthEncodingSuite.scala
index 08df1db375..fde7a4595b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/RunLengthEncodingSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/RunLengthEncodingSuite.scala
@@ -22,7 +22,7 @@ import org.scalatest.FunSuite
import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
import org.apache.spark.sql.columnar._
import org.apache.spark.sql.columnar.ColumnarTestUtils._
-import org.apache.spark.sql.types.NativeType
+import org.apache.spark.sql.types.AtomicType
class RunLengthEncodingSuite extends FunSuite {
testRunLengthEncoding(new NoopColumnStats, BOOLEAN)
@@ -32,7 +32,7 @@ class RunLengthEncodingSuite extends FunSuite {
testRunLengthEncoding(new LongColumnStats, LONG)
testRunLengthEncoding(new StringColumnStats, STRING)
- def testRunLengthEncoding[T <: NativeType](
+ def testRunLengthEncoding[T <: AtomicType](
columnStats: ColumnStats,
columnType: NativeColumnType[T]) {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/TestCompressibleColumnBuilder.scala b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/TestCompressibleColumnBuilder.scala
index fc8ff3b41d..5268dfe0aa 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/TestCompressibleColumnBuilder.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/columnar/compression/TestCompressibleColumnBuilder.scala
@@ -18,9 +18,9 @@
package org.apache.spark.sql.columnar.compression
import org.apache.spark.sql.columnar._
-import org.apache.spark.sql.types.NativeType
+import org.apache.spark.sql.types.AtomicType
-class TestCompressibleColumnBuilder[T <: NativeType](
+class TestCompressibleColumnBuilder[T <: AtomicType](
override val columnStats: ColumnStats,
override val columnType: NativeColumnType[T],
override val schemes: Seq[CompressionScheme])
@@ -32,7 +32,7 @@ class TestCompressibleColumnBuilder[T <: NativeType](
}
object TestCompressibleColumnBuilder {
- def apply[T <: NativeType](
+ def apply[T <: AtomicType](
columnStats: ColumnStats,
columnType: NativeColumnType[T],
scheme: CompressionScheme): TestCompressibleColumnBuilder[T] = {