aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorJoseph K. Bradley <joseph.kurata.bradley@gmail.com>2014-10-01 01:03:24 -0700
committerXiangrui Meng <meng@databricks.com>2014-10-01 01:03:24 -0700
commit7bf6cc9701cbb0f77fb85a412e387fb92274fca5 (patch)
tree21d38a426534826700f9f94b8f8d81034f55ea9b /examples
parenteb43043f411b87b7b412ee31e858246bd93fdd04 (diff)
downloadspark-7bf6cc9701cbb0f77fb85a412e387fb92274fca5.tar.gz
spark-7bf6cc9701cbb0f77fb85a412e387fb92274fca5.tar.bz2
spark-7bf6cc9701cbb0f77fb85a412e387fb92274fca5.zip
[SPARK-3751] [mllib] DecisionTree: example update + print options
DecisionTreeRunner functionality additions: * Allow user to pass in a test dataset * Do not print full model if the model is too large. As part of this, modify DecisionTreeModel and RandomForestModel to allow printing less info. Proposed updates: * toString: prints model summary * toDebugString: prints full model (named after RDD.toDebugString) Similar update to Python API: * __repr__() now prints a model summary * toDebugString() now prints the full model CC: mengxr chouqin manishamde codedeft Small update (whomever can take a look). Thanks! Author: Joseph K. Bradley <joseph.kurata.bradley@gmail.com> Closes #2604 from jkbradley/dtrunner-update and squashes the following commits: b2b3c60 [Joseph K. Bradley] re-added python sql doc test, temporarily removed before 07b1fae [Joseph K. Bradley] repr() now prints a model summary toDebugString() now prints the full model 1d0d93d [Joseph K. Bradley] Updated DT and RF to print less when toString is called. Added toDebugString for verbose printing. 22eac8c [Joseph K. Bradley] Merge remote-tracking branch 'upstream/master' into dtrunner-update e007a95 [Joseph K. Bradley] Updated DecisionTreeRunner to accept a test dataset.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala99
1 files changed, 72 insertions, 27 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index 96fb068e9e..4adc91d2fb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -52,6 +52,7 @@ object DecisionTreeRunner {
case class Params(
input: String = null,
+ testInput: String = "",
dataFormat: String = "libsvm",
algo: Algo = Classification,
maxDepth: Int = 5,
@@ -98,13 +99,18 @@ object DecisionTreeRunner {
s"default: ${defaultParams.featureSubsetStrategy}")
.action((x, c) => c.copy(featureSubsetStrategy = x))
opt[Double]("fracTest")
- .text(s"fraction of data to hold out for testing, default: ${defaultParams.fracTest}")
+ .text(s"fraction of data to hold out for testing. If given option testInput, " +
+ s"this option is ignored. default: ${defaultParams.fracTest}")
.action((x, c) => c.copy(fracTest = x))
+ opt[String]("testInput")
+ .text(s"input path to test dataset. If given, option fracTest is ignored." +
+ s" default: ${defaultParams.testInput}")
+ .action((x, c) => c.copy(testInput = x))
opt[String]("<dataFormat>")
.text("data format: libsvm (default), dense (deprecated in Spark v1.1)")
.action((x, c) => c.copy(dataFormat = x))
arg[String]("<input>")
- .text("input paths to labeled examples in dense format (label,f0 f1 f2 ...)")
+ .text("input path to labeled examples")
.required()
.action((x, c) => c.copy(input = x))
checkConfig { params =>
@@ -141,7 +147,7 @@ object DecisionTreeRunner {
case "libsvm" => MLUtils.loadLibSVMFile(sc, params.input).cache()
}
// For classification, re-index classes if needed.
- val (examples, numClasses) = params.algo match {
+ val (examples, classIndexMap, numClasses) = params.algo match {
case Classification => {
// classCounts: class --> # examples in class
val classCounts = origExamples.map(_.label).countByValue()
@@ -170,16 +176,40 @@ object DecisionTreeRunner {
val frac = classCounts(c) / numExamples.toDouble
println(s"$c\t$frac\t${classCounts(c)}")
}
- (examples, numClasses)
+ (examples, classIndexMap, numClasses)
}
case Regression =>
- (origExamples, 0)
+ (origExamples, null, 0)
case _ =>
throw new IllegalArgumentException("Algo ${params.algo} not supported.")
}
- // Split into training, test.
- val splits = examples.randomSplit(Array(1.0 - params.fracTest, params.fracTest))
+ // Create training, test sets.
+ val splits = if (params.testInput != "") {
+ // Load testInput.
+ val origTestExamples = params.dataFormat match {
+ case "dense" => MLUtils.loadLabeledPoints(sc, params.testInput)
+ case "libsvm" => MLUtils.loadLibSVMFile(sc, params.testInput)
+ }
+ params.algo match {
+ case Classification => {
+ // classCounts: class --> # examples in class
+ val testExamples = {
+ if (classIndexMap.isEmpty) {
+ origTestExamples
+ } else {
+ origTestExamples.map(lp => LabeledPoint(classIndexMap(lp.label), lp.features))
+ }
+ }
+ Array(examples, testExamples)
+ }
+ case Regression =>
+ Array(examples, origTestExamples)
+ }
+ } else {
+ // Split input into training, test.
+ examples.randomSplit(Array(1.0 - params.fracTest, params.fracTest))
+ }
val training = splits(0).cache()
val test = splits(1).cache()
val numTraining = training.count()
@@ -206,32 +236,56 @@ object DecisionTreeRunner {
minInfoGain = params.minInfoGain)
if (params.numTrees == 1) {
val model = DecisionTree.train(training, strategy)
- println(model)
+ if (model.numNodes < 20) {
+ println(model.toDebugString) // Print full model.
+ } else {
+ println(model) // Print model summary.
+ }
if (params.algo == Classification) {
- val accuracy =
+ val trainAccuracy =
+ new MulticlassMetrics(training.map(lp => (model.predict(lp.features), lp.label)))
+ .precision
+ println(s"Train accuracy = $trainAccuracy")
+ val testAccuracy =
new MulticlassMetrics(test.map(lp => (model.predict(lp.features), lp.label))).precision
- println(s"Test accuracy = $accuracy")
+ println(s"Test accuracy = $testAccuracy")
}
if (params.algo == Regression) {
- val mse = meanSquaredError(model, test)
- println(s"Test mean squared error = $mse")
+ val trainMSE = meanSquaredError(model, training)
+ println(s"Train mean squared error = $trainMSE")
+ val testMSE = meanSquaredError(model, test)
+ println(s"Test mean squared error = $testMSE")
}
} else {
val randomSeed = Utils.random.nextInt()
if (params.algo == Classification) {
val model = RandomForest.trainClassifier(training, strategy, params.numTrees,
params.featureSubsetStrategy, randomSeed)
- println(model)
- val accuracy =
+ if (model.totalNumNodes < 30) {
+ println(model.toDebugString) // Print full model.
+ } else {
+ println(model) // Print model summary.
+ }
+ val trainAccuracy =
+ new MulticlassMetrics(training.map(lp => (model.predict(lp.features), lp.label)))
+ .precision
+ println(s"Train accuracy = $trainAccuracy")
+ val testAccuracy =
new MulticlassMetrics(test.map(lp => (model.predict(lp.features), lp.label))).precision
- println(s"Test accuracy = $accuracy")
+ println(s"Test accuracy = $testAccuracy")
}
if (params.algo == Regression) {
val model = RandomForest.trainRegressor(training, strategy, params.numTrees,
params.featureSubsetStrategy, randomSeed)
- println(model)
- val mse = meanSquaredError(model, test)
- println(s"Test mean squared error = $mse")
+ if (model.totalNumNodes < 30) {
+ println(model.toDebugString) // Print full model.
+ } else {
+ println(model) // Print model summary.
+ }
+ val trainMSE = meanSquaredError(model, training)
+ println(s"Train mean squared error = $trainMSE")
+ val testMSE = meanSquaredError(model, test)
+ println(s"Test mean squared error = $testMSE")
}
}
@@ -239,15 +293,6 @@ object DecisionTreeRunner {
}
/**
- * Calculates the classifier accuracy.
- */
- private def accuracyScore(model: DecisionTreeModel, data: RDD[LabeledPoint]): Double = {
- val correctCount = data.filter(y => model.predict(y.features) == y.label).count()
- val count = data.count()
- correctCount.toDouble / count
- }
-
- /**
* Calculates the mean squared error for regression.
*/
private def meanSquaredError(tree: DecisionTreeModel, data: RDD[LabeledPoint]): Double = {