aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2014-12-31 17:07:47 -0800
committerReynold Xin <rxin@databricks.com>2014-12-31 17:07:47 -0800
commit7749dd6c36a182478b20f4636734c8db0b7ddb00 (patch)
tree2466b0a5c668cf6579a9d982241da097033621f9
parent4bb12488d56ea651c56d9688996b464b99095582 (diff)
downloadspark-7749dd6c36a182478b20f4636734c8db0b7ddb00.tar.gz
spark-7749dd6c36a182478b20f4636734c8db0b7ddb00.tar.bz2
spark-7749dd6c36a182478b20f4636734c8db0b7ddb00.zip
[SPARK-5038] Add explicit return type for implicit functions.
As we learned in #3580, not explicitly typing implicit functions can lead to compiler bugs and potentially unexpected runtime behavior. This is a follow up PR for rest of Spark (outside Spark SQL). The original PR for Spark SQL can be found at https://github.com/apache/spark/pull/3859 Author: Reynold Xin <rxin@databricks.com> Closes #3860 from rxin/implicit and squashes the following commits: 73702f9 [Reynold Xin] [SPARK-5038] Add explicit return type for implicit functions.
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala14
-rw-r--r--core/src/main/scala/org/apache/spark/util/Vector.scala38
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartitionBuilder.scala63
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala4
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala4
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartitionBaseOps.scala4
6 files changed, 64 insertions, 63 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 57bc3d4e4a..df1cb3cda2 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -1708,19 +1708,19 @@ object SparkContext extends Logging {
// Implicit conversions to common Writable types, for saveAsSequenceFile
- implicit def intToIntWritable(i: Int) = new IntWritable(i)
+ implicit def intToIntWritable(i: Int): IntWritable = new IntWritable(i)
- implicit def longToLongWritable(l: Long) = new LongWritable(l)
+ implicit def longToLongWritable(l: Long): LongWritable = new LongWritable(l)
- implicit def floatToFloatWritable(f: Float) = new FloatWritable(f)
+ implicit def floatToFloatWritable(f: Float): FloatWritable = new FloatWritable(f)
- implicit def doubleToDoubleWritable(d: Double) = new DoubleWritable(d)
+ implicit def doubleToDoubleWritable(d: Double): DoubleWritable = new DoubleWritable(d)
- implicit def boolToBoolWritable (b: Boolean) = new BooleanWritable(b)
+ implicit def boolToBoolWritable (b: Boolean): BooleanWritable = new BooleanWritable(b)
- implicit def bytesToBytesWritable (aob: Array[Byte]) = new BytesWritable(aob)
+ implicit def bytesToBytesWritable (aob: Array[Byte]): BytesWritable = new BytesWritable(aob)
- implicit def stringToText(s: String) = new Text(s)
+ implicit def stringToText(s: String): Text = new Text(s)
private implicit def arrayToArrayWritable[T <% Writable: ClassTag](arr: Traversable[T])
: ArrayWritable = {
diff --git a/core/src/main/scala/org/apache/spark/util/Vector.scala b/core/src/main/scala/org/apache/spark/util/Vector.scala
index c6cab82c3e..2ed827eab4 100644
--- a/core/src/main/scala/org/apache/spark/util/Vector.scala
+++ b/core/src/main/scala/org/apache/spark/util/Vector.scala
@@ -24,9 +24,9 @@ import org.apache.spark.util.random.XORShiftRandom
@deprecated("Use Vectors.dense from Spark's mllib.linalg package instead.", "1.0.0")
class Vector(val elements: Array[Double]) extends Serializable {
- def length = elements.length
+ def length: Int = elements.length
- def apply(index: Int) = elements(index)
+ def apply(index: Int): Double = elements(index)
def + (other: Vector): Vector = {
if (length != other.length) {
@@ -35,7 +35,7 @@ class Vector(val elements: Array[Double]) extends Serializable {
Vector(length, i => this(i) + other(i))
}
- def add(other: Vector) = this + other
+ def add(other: Vector): Vector = this + other
def - (other: Vector): Vector = {
if (length != other.length) {
@@ -44,7 +44,7 @@ class Vector(val elements: Array[Double]) extends Serializable {
Vector(length, i => this(i) - other(i))
}
- def subtract(other: Vector) = this - other
+ def subtract(other: Vector): Vector = this - other
def dot(other: Vector): Double = {
if (length != other.length) {
@@ -93,19 +93,19 @@ class Vector(val elements: Array[Double]) extends Serializable {
this
}
- def addInPlace(other: Vector) = this +=other
+ def addInPlace(other: Vector): Vector = this +=other
def * (scale: Double): Vector = Vector(length, i => this(i) * scale)
- def multiply (d: Double) = this * d
+ def multiply (d: Double): Vector = this * d
def / (d: Double): Vector = this * (1 / d)
- def divide (d: Double) = this / d
+ def divide (d: Double): Vector = this / d
- def unary_- = this * -1
+ def unary_- : Vector = this * -1
- def sum = elements.reduceLeft(_ + _)
+ def sum: Double = elements.reduceLeft(_ + _)
def squaredDist(other: Vector): Double = {
var ans = 0.0
@@ -119,40 +119,40 @@ class Vector(val elements: Array[Double]) extends Serializable {
def dist(other: Vector): Double = math.sqrt(squaredDist(other))
- override def toString = elements.mkString("(", ", ", ")")
+ override def toString: String = elements.mkString("(", ", ", ")")
}
object Vector {
- def apply(elements: Array[Double]) = new Vector(elements)
+ def apply(elements: Array[Double]): Vector = new Vector(elements)
- def apply(elements: Double*) = new Vector(elements.toArray)
+ def apply(elements: Double*): Vector = new Vector(elements.toArray)
def apply(length: Int, initializer: Int => Double): Vector = {
val elements: Array[Double] = Array.tabulate(length)(initializer)
new Vector(elements)
}
- def zeros(length: Int) = new Vector(new Array[Double](length))
+ def zeros(length: Int): Vector = new Vector(new Array[Double](length))
- def ones(length: Int) = Vector(length, _ => 1)
+ def ones(length: Int): Vector = Vector(length, _ => 1)
/**
* Creates this [[org.apache.spark.util.Vector]] of given length containing random numbers
* between 0.0 and 1.0. Optional scala.util.Random number generator can be provided.
*/
- def random(length: Int, random: Random = new XORShiftRandom()) =
+ def random(length: Int, random: Random = new XORShiftRandom()): Vector =
Vector(length, _ => random.nextDouble())
class Multiplier(num: Double) {
- def * (vec: Vector) = vec * num
+ def * (vec: Vector): Vector = vec * num
}
- implicit def doubleToMultiplier(num: Double) = new Multiplier(num)
+ implicit def doubleToMultiplier(num: Double): Multiplier = new Multiplier(num)
implicit object VectorAccumParam extends org.apache.spark.AccumulatorParam[Vector] {
- def addInPlace(t1: Vector, t2: Vector) = t1 + t2
+ def addInPlace(t1: Vector, t2: Vector): Vector = t1 + t2
- def zero(initialValue: Vector) = Vector.zeros(initialValue.length)
+ def zero(initialValue: Vector): Vector = Vector.zeros(initialValue.length)
}
}
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartitionBuilder.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartitionBuilder.scala
index 409cf60977..906d42328f 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartitionBuilder.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/EdgePartitionBuilder.scala
@@ -129,44 +129,45 @@ private[impl] case class EdgeWithLocalIds[@specialized ED](
srcId: VertexId, dstId: VertexId, localSrcId: Int, localDstId: Int, attr: ED)
private[impl] object EdgeWithLocalIds {
- implicit def lexicographicOrdering[ED] = new Ordering[EdgeWithLocalIds[ED]] {
- override def compare(a: EdgeWithLocalIds[ED], b: EdgeWithLocalIds[ED]): Int = {
- if (a.srcId == b.srcId) {
- if (a.dstId == b.dstId) 0
- else if (a.dstId < b.dstId) -1
+ implicit def lexicographicOrdering[ED]: Ordering[EdgeWithLocalIds[ED]] =
+ new Ordering[EdgeWithLocalIds[ED]] {
+ override def compare(a: EdgeWithLocalIds[ED], b: EdgeWithLocalIds[ED]): Int = {
+ if (a.srcId == b.srcId) {
+ if (a.dstId == b.dstId) 0
+ else if (a.dstId < b.dstId) -1
+ else 1
+ } else if (a.srcId < b.srcId) -1
else 1
- } else if (a.srcId < b.srcId) -1
- else 1
+ }
}
- }
- private[graphx] def edgeArraySortDataFormat[ED]
- = new SortDataFormat[EdgeWithLocalIds[ED], Array[EdgeWithLocalIds[ED]]] {
- override def getKey(
- data: Array[EdgeWithLocalIds[ED]], pos: Int): EdgeWithLocalIds[ED] = {
- data(pos)
- }
+ private[graphx] def edgeArraySortDataFormat[ED] = {
+ new SortDataFormat[EdgeWithLocalIds[ED], Array[EdgeWithLocalIds[ED]]] {
+ override def getKey(data: Array[EdgeWithLocalIds[ED]], pos: Int): EdgeWithLocalIds[ED] = {
+ data(pos)
+ }
- override def swap(data: Array[EdgeWithLocalIds[ED]], pos0: Int, pos1: Int): Unit = {
- val tmp = data(pos0)
- data(pos0) = data(pos1)
- data(pos1) = tmp
- }
+ override def swap(data: Array[EdgeWithLocalIds[ED]], pos0: Int, pos1: Int): Unit = {
+ val tmp = data(pos0)
+ data(pos0) = data(pos1)
+ data(pos1) = tmp
+ }
- override def copyElement(
- src: Array[EdgeWithLocalIds[ED]], srcPos: Int,
- dst: Array[EdgeWithLocalIds[ED]], dstPos: Int) {
- dst(dstPos) = src(srcPos)
- }
+ override def copyElement(
+ src: Array[EdgeWithLocalIds[ED]], srcPos: Int,
+ dst: Array[EdgeWithLocalIds[ED]], dstPos: Int) {
+ dst(dstPos) = src(srcPos)
+ }
- override def copyRange(
- src: Array[EdgeWithLocalIds[ED]], srcPos: Int,
- dst: Array[EdgeWithLocalIds[ED]], dstPos: Int, length: Int) {
- System.arraycopy(src, srcPos, dst, dstPos, length)
- }
+ override def copyRange(
+ src: Array[EdgeWithLocalIds[ED]], srcPos: Int,
+ dst: Array[EdgeWithLocalIds[ED]], dstPos: Int, length: Int) {
+ System.arraycopy(src, srcPos, dst, dstPos, length)
+ }
- override def allocate(length: Int): Array[EdgeWithLocalIds[ED]] = {
- new Array[EdgeWithLocalIds[ED]](length)
+ override def allocate(length: Int): Array[EdgeWithLocalIds[ED]] = {
+ new Array[EdgeWithLocalIds[ED]](length)
+ }
}
}
}
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
index 5412d72047..aa320088f2 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
@@ -74,8 +74,8 @@ object ShippableVertexPartition {
* Implicit conversion to allow invoking `VertexPartitionBase` operations directly on a
* `ShippableVertexPartition`.
*/
- implicit def shippablePartitionToOps[VD: ClassTag](partition: ShippableVertexPartition[VD]) =
- new ShippableVertexPartitionOps(partition)
+ implicit def shippablePartitionToOps[VD: ClassTag](partition: ShippableVertexPartition[VD])
+ : ShippableVertexPartitionOps[VD] = new ShippableVertexPartitionOps(partition)
/**
* Implicit evidence that `ShippableVertexPartition` is a member of the
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala
index 55c7a19d1b..fbe53acfc3 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala
@@ -38,8 +38,8 @@ private[graphx] object VertexPartition {
* Implicit conversion to allow invoking `VertexPartitionBase` operations directly on a
* `VertexPartition`.
*/
- implicit def partitionToOps[VD: ClassTag](partition: VertexPartition[VD]) =
- new VertexPartitionOps(partition)
+ implicit def partitionToOps[VD: ClassTag](partition: VertexPartition[VD])
+ : VertexPartitionOps[VD] = new VertexPartitionOps(partition)
/**
* Implicit evidence that `VertexPartition` is a member of the `VertexPartitionBaseOpsConstructor`
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartitionBaseOps.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartitionBaseOps.scala
index b40aa1b417..4fd2548b7f 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartitionBaseOps.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartitionBaseOps.scala
@@ -238,8 +238,8 @@ private[graphx] abstract class VertexPartitionBaseOps
* because these methods return a `Self` and this implicit conversion re-wraps that in a
* `VertexPartitionBaseOps`. This relies on the context bound on `Self`.
*/
- private implicit def toOps[VD2: ClassTag](
- partition: Self[VD2]): VertexPartitionBaseOps[VD2, Self] = {
+ private implicit def toOps[VD2: ClassTag](partition: Self[VD2])
+ : VertexPartitionBaseOps[VD2, Self] = {
implicitly[VertexPartitionBaseOpsConstructor[Self]].toOps(partition)
}
}