aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala/spark/rdd/SampledRDD.scala
blob: 6e4797aabbec436c90b2faea9d8078c0e90f3ce6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
package spark.rdd

import java.util.Random
import cern.jet.random.Poisson
import cern.jet.random.engine.DRand

import spark.{OneToOneDependency, RDD, Split, TaskContext}


private[spark]
class SampledRDDSplit(val prev: Split, val seed: Int) extends Split with Serializable {
  override val index: Int = prev.index
}

class SampledRDD[T: ClassManifest](
    prev: RDD[T],
    withReplacement: Boolean,
    frac: Double,
    seed: Int)
  extends RDD[T](prev.context) {

  @transient
  val splits_ = {
    val rg = new Random(seed)
    prev.splits.map(x => new SampledRDDSplit(x, rg.nextInt))
  }

  override def splits = splits_.asInstanceOf[Array[Split]]

  override val dependencies = List(new OneToOneDependency(prev))

  override def preferredLocations(split: Split) =
    prev.preferredLocations(split.asInstanceOf[SampledRDDSplit].prev)

  override def compute(splitIn: Split, context: TaskContext) = {
    val split = splitIn.asInstanceOf[SampledRDDSplit]
    if (withReplacement) {
      // For large datasets, the expected number of occurrences of each element in a sample with
      // replacement is Poisson(frac). We use that to get a count for each element.
      val poisson = new Poisson(frac, new DRand(split.seed))
      prev.iterator(split.prev, context).flatMap { element =>
        val count = poisson.nextInt()
        if (count == 0) {
          Iterator.empty  // Avoid object allocation when we return 0 items, which is quite often
        } else {
          Iterator.fill(count)(element)
        }
      }
    } else { // Sampling without replacement
      val rand = new Random(split.seed)
      prev.iterator(split.prev, context).filter(x => (rand.nextDouble <= frac))
    }
  }
}