aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-07-24 09:38:13 -0700
committerReynold Xin <rxin@databricks.com>2015-07-24 09:38:13 -0700
commitc8d71a4183dfc83ff257047857af0b6d66c6b90d (patch)
tree43819dc841e884b7bf9906a33178c2695122e953 /core
parent431ca39be51352dfcdacc87de7e64c2af313558d (diff)
downloadspark-c8d71a4183dfc83ff257047857af0b6d66c6b90d.tar.gz
spark-c8d71a4183dfc83ff257047857af0b6d66c6b90d.tar.bz2
spark-c8d71a4183dfc83ff257047857af0b6d66c6b90d.zip
[SPARK-9305] Rename org.apache.spark.Row to Item.
It's a thing used in test cases, but named Row. Pretty annoying because everytime I search for Row, it shows up before the Spark SQL Row, which is what a developer wants most of the time. Author: Reynold Xin <rxin@databricks.com> Closes #7638 from rxin/remove-row and squashes the following commits: aeda52d [Reynold Xin] [SPARK-9305] Rename org.apache.spark.Row to Item.
Diffstat (limited to 'core')
-rw-r--r--core/src/test/scala/org/apache/spark/PartitioningSuite.scala10
1 files changed, 5 insertions, 5 deletions
diff --git a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
index 3316f561a4..aa8028792c 100644
--- a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
+++ b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
@@ -91,13 +91,13 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva
test("RangePartitioner for keys that are not Comparable (but with Ordering)") {
// Row does not extend Comparable, but has an implicit Ordering defined.
- implicit object RowOrdering extends Ordering[Row] {
- override def compare(x: Row, y: Row): Int = x.value - y.value
+ implicit object RowOrdering extends Ordering[Item] {
+ override def compare(x: Item, y: Item): Int = x.value - y.value
}
- val rdd = sc.parallelize(1 to 4500).map(x => (Row(x), Row(x)))
+ val rdd = sc.parallelize(1 to 4500).map(x => (Item(x), Item(x)))
val partitioner = new RangePartitioner(1500, rdd)
- partitioner.getPartition(Row(100))
+ partitioner.getPartition(Item(100))
}
test("RangPartitioner.sketch") {
@@ -252,4 +252,4 @@ class PartitioningSuite extends SparkFunSuite with SharedSparkContext with Priva
}
-private sealed case class Row(value: Int)
+private sealed case class Item(value: Int)