aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/main/scala
diff options
context:
space:
mode:
Diffstat (limited to 'mllib/src/main/scala')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/Estimator.scala38
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/Transformer.scala24
2 files changed, 0 insertions, 62 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/Estimator.scala b/mllib/src/main/scala/org/apache/spark/ml/Estimator.scala
index fdbee743e8..77d230eb4a 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/Estimator.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/Estimator.scala
@@ -18,12 +18,10 @@
package org.apache.spark.ml
import scala.annotation.varargs
-import scala.collection.JavaConverters._
import org.apache.spark.annotation.AlphaComponent
import org.apache.spark.ml.param.{ParamMap, ParamPair, Params}
import org.apache.spark.sql.SchemaRDD
-import org.apache.spark.sql.api.java.JavaSchemaRDD
/**
* :: AlphaComponent ::
@@ -66,40 +64,4 @@ abstract class Estimator[M <: Model[M]] extends PipelineStage with Params {
def fit(dataset: SchemaRDD, paramMaps: Array[ParamMap]): Seq[M] = {
paramMaps.map(fit(dataset, _))
}
-
- // Java-friendly versions of fit.
-
- /**
- * Fits a single model to the input data with optional parameters.
- *
- * @param dataset input dataset
- * @param paramPairs optional list of param pairs (overwrite embedded params)
- * @return fitted model
- */
- @varargs
- def fit(dataset: JavaSchemaRDD, paramPairs: ParamPair[_]*): M = {
- fit(dataset.schemaRDD, paramPairs: _*)
- }
-
- /**
- * Fits a single model to the input data with provided parameter map.
- *
- * @param dataset input dataset
- * @param paramMap parameter map
- * @return fitted model
- */
- def fit(dataset: JavaSchemaRDD, paramMap: ParamMap): M = {
- fit(dataset.schemaRDD, paramMap)
- }
-
- /**
- * Fits multiple models to the input data with multiple sets of parameters.
- *
- * @param dataset input dataset
- * @param paramMaps an array of parameter maps
- * @return fitted models, matching the input parameter maps
- */
- def fit(dataset: JavaSchemaRDD, paramMaps: Array[ParamMap]): java.util.List[M] = {
- fit(dataset.schemaRDD, paramMaps).asJava
- }
}
diff --git a/mllib/src/main/scala/org/apache/spark/ml/Transformer.scala b/mllib/src/main/scala/org/apache/spark/ml/Transformer.scala
index 1331b91240..af56f9c435 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/Transformer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/Transformer.scala
@@ -23,7 +23,6 @@ import org.apache.spark.Logging
import org.apache.spark.annotation.AlphaComponent
import org.apache.spark.ml.param._
import org.apache.spark.sql.SchemaRDD
-import org.apache.spark.sql.api.java.JavaSchemaRDD
import org.apache.spark.sql.catalyst.analysis.Star
import org.apache.spark.sql.catalyst.expressions.ScalaUdf
import org.apache.spark.sql.types._
@@ -55,29 +54,6 @@ abstract class Transformer extends PipelineStage with Params {
* @return transformed dataset
*/
def transform(dataset: SchemaRDD, paramMap: ParamMap): SchemaRDD
-
- // Java-friendly versions of transform.
-
- /**
- * Transforms the dataset with optional parameters.
- * @param dataset input datset
- * @param paramPairs optional list of param pairs, overwrite embedded params
- * @return transformed dataset
- */
- @varargs
- def transform(dataset: JavaSchemaRDD, paramPairs: ParamPair[_]*): JavaSchemaRDD = {
- transform(dataset.schemaRDD, paramPairs: _*).toJavaSchemaRDD
- }
-
- /**
- * Transforms the dataset with provided parameter map as additional parameters.
- * @param dataset input dataset
- * @param paramMap additional parameters, overwrite embedded params
- * @return transformed dataset
- */
- def transform(dataset: JavaSchemaRDD, paramMap: ParamMap): JavaSchemaRDD = {
- transform(dataset.schemaRDD, paramMap).toJavaSchemaRDD
- }
}
/**