aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala
diff options
context:
space:
mode:
Diffstat (limited to 'examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala77
1 files changed, 77 insertions, 0 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala
new file mode 100644
index 0000000000..f4fc3cf411
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.ml
+
+// $example on$
+import org.apache.spark.ml.feature.MinHashLSH
+import org.apache.spark.ml.linalg.Vectors
+// $example off$
+import org.apache.spark.sql.SparkSession
+
+object MinHashLSHExample {
+ def main(args: Array[String]): Unit = {
+ // Creates a SparkSession
+ val spark = SparkSession
+ .builder
+ .appName("MinHashLSHExample")
+ .getOrCreate()
+
+ // $example on$
+ val dfA = spark.createDataFrame(Seq(
+ (0, Vectors.sparse(6, Seq((0, 1.0), (1, 1.0), (2, 1.0)))),
+ (1, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (4, 1.0)))),
+ (2, Vectors.sparse(6, Seq((0, 1.0), (2, 1.0), (4, 1.0))))
+ )).toDF("id", "keys")
+
+ val dfB = spark.createDataFrame(Seq(
+ (3, Vectors.sparse(6, Seq((1, 1.0), (3, 1.0), (5, 1.0)))),
+ (4, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (5, 1.0)))),
+ (5, Vectors.sparse(6, Seq((1, 1.0), (2, 1.0), (4, 1.0))))
+ )).toDF("id", "keys")
+
+ val key = Vectors.sparse(6, Seq((1, 1.0), (3, 1.0)))
+
+ val mh = new MinHashLSH()
+ .setNumHashTables(3)
+ .setInputCol("keys")
+ .setOutputCol("values")
+
+ val model = mh.fit(dfA)
+
+ // Feature Transformation
+ model.transform(dfA).show()
+ // Cache the transformed columns
+ val transformedA = model.transform(dfA).cache()
+ val transformedB = model.transform(dfB).cache()
+
+ // Approximate similarity join
+ model.approxSimilarityJoin(dfA, dfB, 0.6).show()
+ model.approxSimilarityJoin(transformedA, transformedB, 0.6).show()
+ // Self Join
+ model.approxSimilarityJoin(dfA, dfA, 0.6).filter("datasetA.id < datasetB.id").show()
+
+ // Approximate nearest neighbor search
+ model.approxNearestNeighbors(dfA, key, 2).show()
+ model.approxNearestNeighbors(transformedA, key, 2).show()
+ // $example off$
+
+ spark.stop()
+ }
+}
+// scalastyle:on println