aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-01-15 12:03:28 -0800
committerJosh Rosen <joshrosen@databricks.com>2016-01-15 12:03:28 -0800
commitad1503f92e1f6e960a24f9f5d36b1735d1f5073a (patch)
tree564c1b3b8bea707d98d9467a7bbba87845051908 /examples
parent5f83c6991c95616ecbc2878f8860c69b2826f56c (diff)
downloadspark-ad1503f92e1f6e960a24f9f5d36b1735d1f5073a.tar.gz
spark-ad1503f92e1f6e960a24f9f5d36b1735d1f5073a.tar.bz2
spark-ad1503f92e1f6e960a24f9f5d36b1735d1f5073a.zip
[SPARK-12667] Remove block manager's internal "external block store" API
This pull request removes the external block store API. This is rarely used, and the file system interface is actually a better, more standard way to interact with external storage systems. There are some other things to remove also, as pointed out by JoshRosen. We will do those as follow-up pull requests. Author: Reynold Xin <rxin@databricks.com> Closes #10752 from rxin/remove-offheap.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala93
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala50
2 files changed, 0 insertions, 143 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
deleted file mode 100644
index 8b739c9d7c..0000000000
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// scalastyle:off println
-package org.apache.spark.examples
-
-import java.util.Random
-
-import scala.math.exp
-
-import breeze.linalg.{DenseVector, Vector}
-import org.apache.hadoop.conf.Configuration
-
-import org.apache.spark._
-import org.apache.spark.storage.StorageLevel
-
-/**
- * Logistic regression based classification.
- * This example uses Tachyon to persist rdds during computation.
- *
- * This is an example implementation for learning how to use Spark. For more conventional use,
- * please refer to either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
- */
-object SparkTachyonHdfsLR {
- val D = 10 // Numer of dimensions
- val rand = new Random(42)
-
- def showWarning() {
- System.err.println(
- """WARN: This is a naive implementation of Logistic Regression and is given as an example!
- |Please use either org.apache.spark.mllib.classification.LogisticRegressionWithSGD or
- |org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
- |for more conventional use.
- """.stripMargin)
- }
-
- case class DataPoint(x: Vector[Double], y: Double)
-
- def parsePoint(line: String): DataPoint = {
- val tok = new java.util.StringTokenizer(line, " ")
- var y = tok.nextToken.toDouble
- var x = new Array[Double](D)
- var i = 0
- while (i < D) {
- x(i) = tok.nextToken.toDouble; i += 1
- }
- DataPoint(new DenseVector(x), y)
- }
-
- def main(args: Array[String]) {
-
- showWarning()
-
- val inputPath = args(0)
- val sparkConf = new SparkConf().setAppName("SparkTachyonHdfsLR")
- val conf = new Configuration()
- val sc = new SparkContext(sparkConf)
- val lines = sc.textFile(inputPath)
- val points = lines.map(parsePoint).persist(StorageLevel.OFF_HEAP)
- val ITERATIONS = args(1).toInt
-
- // Initialize w to a random value
- var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
- println("Initial w: " + w)
-
- for (i <- 1 to ITERATIONS) {
- println("On iteration " + i)
- val gradient = points.map { p =>
- p.x * (1 / (1 + exp(-p.y * (w.dot(p.x)))) - 1) * p.y
- }.reduce(_ + _)
- w -= gradient
- }
-
- println("Final w: " + w)
- sc.stop()
- }
-}
-// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala
deleted file mode 100644
index e46ac655be..0000000000
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonPi.scala
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// scalastyle:off println
-package org.apache.spark.examples
-
-import scala.math.random
-
-import org.apache.spark._
-import org.apache.spark.storage.StorageLevel
-
-/**
- * Computes an approximation to pi
- * This example uses Tachyon to persist rdds during computation.
- */
-object SparkTachyonPi {
- def main(args: Array[String]) {
- val sparkConf = new SparkConf().setAppName("SparkTachyonPi")
- val spark = new SparkContext(sparkConf)
-
- val slices = if (args.length > 0) args(0).toInt else 2
- val n = 100000 * slices
-
- val rdd = spark.parallelize(1 to n, slices)
- rdd.persist(StorageLevel.OFF_HEAP)
- val count = rdd.map { i =>
- val x = random * 2 - 1
- val y = random * 2 - 1
- if (x * x + y * y < 1) 1 else 0
- }.reduce(_ + _)
- println("Pi is roughly " + 4.0 * count / n)
-
- spark.stop()
- }
-}
-// scalastyle:on println