aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/main/scala/spark/mllib/regression/RidgeRegressionGenerator.scala
blob: c9ac4a8b07c733881aefc1a2a0d2b023d05e0fcb (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
package spark.mllib.regression

import scala.util.Random

import org.jblas.DoubleMatrix

import spark.{RDD, SparkContext}
import spark.mllib.util.MLUtils


object RidgeRegressionGenerator {

  def main(args: Array[String]) {
    if (args.length != 5) {
      println("Usage: RidgeRegressionGenerator " +
        "<master> <output_dir> <num_examples> <num_features> <num_partitions>")
      System.exit(1)
    }

    val sparkMaster: String = args(0)
    val outputPath: String = args(1)
    val nexamples: Int = if (args.length > 2) args(2).toInt else 1000
    val nfeatures: Int = if (args.length > 3) args(3).toInt else 100
    val parts: Int = if (args.length > 4) args(4).toInt else 2
    val eps = 10

    org.jblas.util.Random.seed(42)
    val sc = new SparkContext(sparkMaster, "RidgeRegressionGenerator")

    // Random values distributed uniformly in [-0.5, 0.5]
    val w = DoubleMatrix.rand(nfeatures, 1).subi(0.5)
    w.put(0, 0, 10)
    w.put(1, 0, 10)

    val data: RDD[(Double, Array[Double])] = sc.parallelize(0 until parts, parts).flatMap { p =>
      org.jblas.util.Random.seed(42 + p)
      val examplesInPartition = nexamples / parts

      val X = DoubleMatrix.rand(examplesInPartition, nfeatures)
      val y = X.mmul(w)

      val rnd = new Random(42 + p)

      val normalValues = Array.fill[Double](examplesInPartition)(rnd.nextGaussian() * eps)
      val yObs = new DoubleMatrix(normalValues).addi(y)

      Iterator.tabulate(examplesInPartition) { i =>
        (yObs.get(i, 0), X.getRow(i).toArray)
      }
    }

    MLUtils.saveData(data, outputPath)
    sc.stop()
  }
}