aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala/spark/api/python/PythonPartitioner.scala
blob: d618c098c2bed7f74773c3ba028319fbfcd7f070 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
package spark.api.python

import spark.Partitioner

import java.util.Arrays

/**
 * A [[spark.Partitioner]] that performs handling of byte arrays, for use by the Python API.
 *
 * Stores the unique id() of the Python-side partitioning function so that it is incorporated into
 * equality comparisons.  Correctness requires that the id is a unique identifier for the
 * lifetime of the program (i.e. that it is not re-used as the id of a different partitioning
 * function).  This can be ensured by using the Python id() function and maintaining a reference
 * to the Python partitioning function so that its id() is not reused.
 */
private[spark] class PythonPartitioner(
  override val numPartitions: Int,
  val pyPartitionFunctionId: Long)
  extends Partitioner {

  override def getPartition(key: Any): Int = {
    if (key == null) {
      return 0
    }
    else {
      val hashCode = {
        if (key.isInstanceOf[Array[Byte]]) {
          Arrays.hashCode(key.asInstanceOf[Array[Byte]])
        } else {
          key.hashCode()
        }
      }
      val mod = hashCode % numPartitions
      if (mod < 0) {
        mod + numPartitions
      } else {
        mod // Guard against negative hash codes
      }
    }
  }

  override def equals(other: Any): Boolean = other match {
    case h: PythonPartitioner =>
      h.numPartitions == numPartitions && h.pyPartitionFunctionId == pyPartitionFunctionId
    case _ =>
      false
  }
}