From 48851d4dd90184fdeee836caef5ad77b0dc643be Mon Sep 17 00:00:00 2001 From: Shivaram Venkataraman Date: Tue, 30 Jul 2013 14:03:15 -0700 Subject: Add bagel, mllib to SBT assembly. Also add jblas dependency to mllib pom.xml --- mllib/pom.xml | 165 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 mllib/pom.xml (limited to 'mllib') diff --git a/mllib/pom.xml b/mllib/pom.xml new file mode 100644 index 0000000000..f3928cc73d --- /dev/null +++ b/mllib/pom.xml @@ -0,0 +1,165 @@ + + + + + 4.0.0 + + org.spark-project + spark-parent + 0.8.0-SNAPSHOT + ../pom.xml + + + org.spark-project + spark-mllib + jar + Spark Project ML Library + http://spark-project.org/ + + + + org.eclipse.jetty + jetty-server + + + org.jblas + jblas + 1.2.3 + + + + org.scalatest + scalatest_${scala.version} + test + + + org.scalacheck + scalacheck_${scala.version} + test + + + + target/scala-${scala.version}/classes + target/scala-${scala.version}/test-classes + + + org.scalatest + scalatest-maven-plugin + + + + + + + hadoop1 + + + org.spark-project + spark-core + ${project.version} + hadoop1 + + + org.apache.hadoop + hadoop-core + provided + + + + + + org.apache.maven.plugins + maven-jar-plugin + + hadoop1 + + + + + + + hadoop2 + + + org.spark-project + spark-core + ${project.version} + hadoop2 + + + org.apache.hadoop + hadoop-core + provided + + + org.apache.hadoop + hadoop-client + provided + + + + + + org.apache.maven.plugins + maven-jar-plugin + + hadoop2 + + + + + + + hadoop2-yarn + + + org.spark-project + spark-core + ${project.version} + hadoop2-yarn + + + org.apache.hadoop + hadoop-client + provided + + + org.apache.hadoop + hadoop-yarn-api + provided + + + org.apache.hadoop + hadoop-yarn-common + provided + + + + + + org.apache.maven.plugins + maven-jar-plugin + + hadoop2-yarn + + + + + + + -- cgit v1.2.3 From 9a444cffe74374f0d764d1ed8197423e40529f24 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Wed, 31 Jul 2013 11:28:39 -0700 Subject: Use the Char version of split() instead of the String one for efficiency --- mllib/src/main/scala/spark/mllib/util/MLUtils.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/spark/mllib/util/MLUtils.scala index b5e564df6d..25d9673004 100644 --- a/mllib/src/main/scala/spark/mllib/util/MLUtils.scala +++ b/mllib/src/main/scala/spark/mllib/util/MLUtils.scala @@ -38,9 +38,9 @@ object MLUtils { */ def loadLabeledData(sc: SparkContext, dir: String): RDD[(Double, Array[Double])] = { sc.textFile(dir).map { line => - val parts = line.split(",") + val parts = line.split(',') val label = parts(0).toDouble - val features = parts(1).trim().split(" ").map(_.toDouble) + val features = parts(1).trim().split(' ').map(_.toDouble) (label, features) } } -- cgit v1.2.3 From f607ffb9e1f799d73818f1d37c633007a6b900fb Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Wed, 31 Jul 2013 14:31:07 -0700 Subject: Added data generator for K-means Also made it possible to specify the number of runs in KMeans.main(). --- .../main/scala/spark/mllib/clustering/KMeans.scala | 7 +- .../spark/mllib/util/KMeansDataGenerator.scala | 80 ++++++++++++++++++++++ 2 files changed, 84 insertions(+), 3 deletions(-) create mode 100644 mllib/src/main/scala/spark/mllib/util/KMeansDataGenerator.scala (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala b/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala index d875d6de50..a2ed42d7a5 100644 --- a/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala +++ b/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala @@ -315,14 +315,15 @@ object KMeans { } def main(args: Array[String]) { - if (args.length != 4) { - println("Usage: KMeans ") + if (args.length < 4) { + println("Usage: KMeans []") System.exit(1) } val (master, inputFile, k, iters) = (args(0), args(1), args(2).toInt, args(3).toInt) + val runs = if (args.length >= 5) args(4).toInt else 1 val sc = new SparkContext(master, "KMeans") val data = sc.textFile(inputFile).map(line => line.split(' ').map(_.toDouble)) - val model = KMeans.train(data, k, iters) + val model = KMeans.train(data, k, iters, runs) val cost = model.computeCost(data) println("Cluster centers:") for (c <- model.clusterCenters) { diff --git a/mllib/src/main/scala/spark/mllib/util/KMeansDataGenerator.scala b/mllib/src/main/scala/spark/mllib/util/KMeansDataGenerator.scala new file mode 100644 index 0000000000..8f95cf7479 --- /dev/null +++ b/mllib/src/main/scala/spark/mllib/util/KMeansDataGenerator.scala @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package spark.mllib.util + +import scala.util.Random + +import spark.{RDD, SparkContext} + +object KMeansDataGenerator { + + /** + * Generate an RDD containing test data for KMeans. This function chooses k cluster centers + * from a d-dimensional Gaussian distribution scaled by factor r, then creates a Gaussian + * cluster with scale 1 around each center. + * + * @param sc SparkContext to use for creating the RDD + * @param numPoints Number of points that will be contained in the RDD + * @param k Number of clusters + * @param d Number of dimensions + * @parak r Scaling factor for the distribution of the initial centers + * @param numPartitions Number of partitions of the generated RDD; default 2 + */ + def generateKMeansRDD( + sc: SparkContext, + numPoints: Int, + k: Int, + d: Int, + r: Double, + numPartitions: Int = 2) + : RDD[Array[Double]] = + { + // First, generate some centers + val rand = new Random(42) + val centers = Array.fill(k)(Array.fill(d)(rand.nextGaussian() * r)) + // Then generate points around each center + sc.parallelize(0 until numPoints, numPartitions).map { idx => + val center = centers(idx % k) + val rand2 = new Random(42 + idx) + Array.tabulate(d)(i => center(i) + rand2.nextGaussian()) + } + } + + def main(args: Array[String]) { + if (args.length < 6) { + println("Usage: KMeansGenerator " + + " []") + System.exit(1) + } + + val sparkMaster = args(0) + val outputPath = args(1) + val numPoints = args(2).toInt + val k = args(3).toInt + val d = args(4).toInt + val r = args(5).toDouble + val parts = if (args.length >= 7) args(6).toInt else 2 + + val sc = new SparkContext(sparkMaster, "KMeansDataGenerator") + val data = generateKMeansRDD(sc, numPoints, k, d, r, parts) + data.map(_.mkString(" ")).saveAsTextFile(outputPath) + + System.exit(0) + } +} + -- cgit v1.2.3 From 52dba89261ee6dddafff5c746322980567252843 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Wed, 31 Jul 2013 23:08:12 -0700 Subject: Turn on caching in KMeans.main --- mllib/src/main/scala/spark/mllib/clustering/KMeans.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala b/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala index a2ed42d7a5..b402c71ed2 100644 --- a/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala +++ b/mllib/src/main/scala/spark/mllib/clustering/KMeans.scala @@ -322,7 +322,7 @@ object KMeans { val (master, inputFile, k, iters) = (args(0), args(1), args(2).toInt, args(3).toInt) val runs = if (args.length >= 5) args(4).toInt else 1 val sc = new SparkContext(master, "KMeans") - val data = sc.textFile(inputFile).map(line => line.split(' ').map(_.toDouble)) + val data = sc.textFile(inputFile).map(line => line.split(' ').map(_.toDouble)).cache() val model = KMeans.train(data, k, iters, runs) val cost = model.computeCost(data) println("Cluster centers:") -- cgit v1.2.3 From abfa9e6f708b46894be1972f46efe542578a30f1 Mon Sep 17 00:00:00 2001 From: Matei Zaharia Date: Fri, 2 Aug 2013 16:17:32 -0700 Subject: Increase Kryo buffer size in ALS since some arrays become big --- mllib/src/main/scala/spark/mllib/recommendation/ALS.scala | 1 + 1 file changed, 1 insertion(+) (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/recommendation/ALS.scala b/mllib/src/main/scala/spark/mllib/recommendation/ALS.scala index 7281b2fcb9..6ecf0151a1 100644 --- a/mllib/src/main/scala/spark/mllib/recommendation/ALS.scala +++ b/mllib/src/main/scala/spark/mllib/recommendation/ALS.scala @@ -418,6 +418,7 @@ object ALS { System.setProperty("spark.serializer", "spark.KryoSerializer") System.setProperty("spark.kryo.registrator", classOf[ALSRegistrator].getName) System.setProperty("spark.kryo.referenceTracking", "false") + System.setProperty("spark.kryoserializer.buffer.mb", "8") System.setProperty("spark.locality.wait", "10000") val sc = new SparkContext(master, "ALS") val ratings = sc.textFile(ratingsFile).map { line => -- cgit v1.2.3 From 4ab4df5edbc1bded810a8a3e1dfc7f8ae40a7c30 Mon Sep 17 00:00:00 2001 From: Ginger Smith Date: Fri, 2 Aug 2013 22:22:36 -0700 Subject: adding matrix factorization data generator --- .../scala/spark/mllib/util/MFDataGenerator.scala | 105 +++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala new file mode 100644 index 0000000000..8637d27cd0 --- /dev/null +++ b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package spark.mllib.recommendation + +import scala.util.Random + +import org.jblas.DoubleMatrix + +import spark.{RDD, SparkContext} +import spark.mllib.util.MLUtils + + +object MFDataGenerator{ + + /** + * Generate RDD(s) containing data for Matrix Factorization. This function chooses + * positive labels with probability `probOne` and scales positive examples by `eps`. + * + * @param sc SparkContext to use for creating the RDD. + * @param outputPath Directory to save output. + * @param m Number of rows in data matrix. + * @param n Number of columns in data matrix. + * @param rank Underlying rank of data matrix. + * @param tr_samp_fact Oversampling factor. + * @param noise Boolean value - whether to add gaussian noise to training data. + * @param sigma Standard deviation of added gaussian noise. + * @param test Boolean value - whether to create testing RDD. + * @param te_samp_fact Percentage of training data to use as test data. + */ + + def main(args: Array[String]) { + if (args.length != 10) { + println("Usage: MFGenerator " + + " ") + System.exit(1) + } + + val sparkMaster: String = args(0) + val outputPath: String = args(1) + val m: Int = if (args.length > 2) args(2).toInt else 100 + val n: Int = if (args.length > 3) args(3).toInt else 100 + val rank: Int = if (args.length > 4) args(4).toInt else 10 + val tr_samp_fact: Double = if (args.length > 5) args(5).toDouble else 1.0 + val noise: Boolean = if (args.length > 6) args(6).toBoolean else false + val sigma: Double = if (args.length > 7) args(7).toDouble else 0.1 + val test: Boolean = if (args.length > 8) args(8).toBoolean else false + val te_samp_fact: Double = if (args.length > 9) args(9).toDouble else 0.1 + + val sc = new SparkContext(sparkMaster, "MFDataGenerator") + + val A = DoubleMatrix.randn(m,rank) + val B = DoubleMatrix.randn(rank,n) + val z = 1/(scala.math.sqrt(scala.math.sqrt(rank))) + A.mmuli(z) + B.mmuli(z) + val fullData = A.mmul(B) + + val df = rank*(m+n-rank) + val sampsize = scala.math.min(scala.math.round(tr_samp_fact*df), scala.math.round(.99*m*n)).toInt + val rand = new Random() + val mn = m*n + val shuffled = rand.shuffle(1 to mn toIterable) + + val omega = shuffled.slice(0,sampsize) + val ordered = omega.sortWith(_ < _).toArray + val trainData: RDD[(Int, Int, Double)] = sc.parallelize(ordered) + .map(x => (fullData.indexRows(x-1),fullData.indexColumns(x-1),fullData.get(x-1))) + + // optionally add gaussian noise + if(noise){ + trainData.map(x => (x._1,x._2,x._3+rand.nextGaussian*sigma)) + } + + trainData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) + + // optionally generate testing data + if(test){ + val test_sampsize = scala.math + .min(scala.math.round(sampsize*te_samp_fact),scala.math.round(mn-sampsize)) + .toInt + val test_omega = shuffled.slice(sampsize,sampsize+test_sampsize) + val test_ordered = test_omega.sortWith(_ < _).toArray + val testData: RDD[(Int, Int, Double)] = sc.parallelize(test_ordered) + .map(x=> (fullData.indexRows(x-1),fullData.indexColumns(x-1),fullData.get(x-1))) + testData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) + } + + sc.stop() + } +} \ No newline at end of file -- cgit v1.2.3 From 8c8947e2b66169dddb828b801ffaa43cc400b8a5 Mon Sep 17 00:00:00 2001 From: Ginger Smith Date: Mon, 5 Aug 2013 11:22:18 -0700 Subject: fixing formatting --- .../scala/spark/mllib/util/MFDataGenerator.scala | 39 +++++++++++++--------- 1 file changed, 23 insertions(+), 16 deletions(-) (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala index 8637d27cd0..1d2b5c89f0 100644 --- a/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala +++ b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala @@ -24,25 +24,32 @@ import org.jblas.DoubleMatrix import spark.{RDD, SparkContext} import spark.mllib.util.MLUtils +/** +* Generate RDD(s) containing data for Matrix Factorization. +* +* This method samples training entries according to the oversampling factor +* 'tr_samp_fact', which is a multiplicative factor of the number of +* degrees of freedom of the matrix: rank*(m+n-rank). +* +* It optionally samples entries for a testing matrix using +* 'te_samp_fact', the percentage of the number of training entries +* to use for testing. +* +* This method takes the following inputs: +* sparkMaster (String) The master URL. +* outputPath (String) Directory to save output. +* m (Int) Number of rows in data matrix. +* n (Int) Number of columns in data matrix. +* rank (Int) Underlying rank of data matrix. +* tr_samp_fact (Double) Oversampling factor. +* noise (Boolean) Whether to add gaussian noise to training data. +* sigma (Double) Standard deviation of added gaussian noise. +* test (Boolean) Whether to create testing RDD. +* te_samp_fact (Double) Percentage of training data to use as test data. +*/ object MFDataGenerator{ - /** - * Generate RDD(s) containing data for Matrix Factorization. This function chooses - * positive labels with probability `probOne` and scales positive examples by `eps`. - * - * @param sc SparkContext to use for creating the RDD. - * @param outputPath Directory to save output. - * @param m Number of rows in data matrix. - * @param n Number of columns in data matrix. - * @param rank Underlying rank of data matrix. - * @param tr_samp_fact Oversampling factor. - * @param noise Boolean value - whether to add gaussian noise to training data. - * @param sigma Standard deviation of added gaussian noise. - * @param test Boolean value - whether to create testing RDD. - * @param te_samp_fact Percentage of training data to use as test data. - */ - def main(args: Array[String]) { if (args.length != 10) { println("Usage: MFGenerator " + -- cgit v1.2.3 From bf7033f3ebf9315ccf9aba09a6e702c3a671fd8d Mon Sep 17 00:00:00 2001 From: Ginger Smith Date: Mon, 5 Aug 2013 21:26:24 -0700 Subject: fixing formatting, style, and input --- .../scala/spark/mllib/util/MFDataGenerator.scala | 73 +++++++++++----------- 1 file changed, 37 insertions(+), 36 deletions(-) (limited to 'mllib') diff --git a/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala index 1d2b5c89f0..88992cde0c 100644 --- a/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala +++ b/mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala @@ -28,32 +28,32 @@ import spark.mllib.util.MLUtils * Generate RDD(s) containing data for Matrix Factorization. * * This method samples training entries according to the oversampling factor -* 'tr_samp_fact', which is a multiplicative factor of the number of +* 'trainSampFact', which is a multiplicative factor of the number of * degrees of freedom of the matrix: rank*(m+n-rank). * * It optionally samples entries for a testing matrix using -* 'te_samp_fact', the percentage of the number of training entries +* 'testSampFact', the percentage of the number of training entries * to use for testing. * * This method takes the following inputs: -* sparkMaster (String) The master URL. -* outputPath (String) Directory to save output. -* m (Int) Number of rows in data matrix. -* n (Int) Number of columns in data matrix. -* rank (Int) Underlying rank of data matrix. -* tr_samp_fact (Double) Oversampling factor. -* noise (Boolean) Whether to add gaussian noise to training data. -* sigma (Double) Standard deviation of added gaussian noise. -* test (Boolean) Whether to create testing RDD. -* te_samp_fact (Double) Percentage of training data to use as test data. +* sparkMaster (String) The master URL. +* outputPath (String) Directory to save output. +* m (Int) Number of rows in data matrix. +* n (Int) Number of columns in data matrix. +* rank (Int) Underlying rank of data matrix. +* trainSampFact (Double) Oversampling factor. +* noise (Boolean) Whether to add gaussian noise to training data. +* sigma (Double) Standard deviation of added gaussian noise. +* test (Boolean) Whether to create testing RDD. +* testSampFact (Double) Percentage of training data to use as test data. */ object MFDataGenerator{ def main(args: Array[String]) { - if (args.length != 10) { - println("Usage: MFGenerator " + - " ") + if (args.length < 2) { + println("Usage: MFDataGenerator " + + " [m] [n] [rank] [trainSampFact] [noise] [sigma] [test] [testSampFact]") System.exit(1) } @@ -62,51 +62,52 @@ object MFDataGenerator{ val m: Int = if (args.length > 2) args(2).toInt else 100 val n: Int = if (args.length > 3) args(3).toInt else 100 val rank: Int = if (args.length > 4) args(4).toInt else 10 - val tr_samp_fact: Double = if (args.length > 5) args(5).toDouble else 1.0 + val trainSampFact: Double = if (args.length > 5) args(5).toDouble else 1.0 val noise: Boolean = if (args.length > 6) args(6).toBoolean else false val sigma: Double = if (args.length > 7) args(7).toDouble else 0.1 val test: Boolean = if (args.length > 8) args(8).toBoolean else false - val te_samp_fact: Double = if (args.length > 9) args(9).toDouble else 0.1 + val testSampFact: Double = if (args.length > 9) args(9).toDouble else 0.1 val sc = new SparkContext(sparkMaster, "MFDataGenerator") - val A = DoubleMatrix.randn(m,rank) - val B = DoubleMatrix.randn(rank,n) - val z = 1/(scala.math.sqrt(scala.math.sqrt(rank))) + val A = DoubleMatrix.randn(m, rank) + val B = DoubleMatrix.randn(rank, n) + val z = 1 / (scala.math.sqrt(scala.math.sqrt(rank))) A.mmuli(z) B.mmuli(z) val fullData = A.mmul(B) - val df = rank*(m+n-rank) - val sampsize = scala.math.min(scala.math.round(tr_samp_fact*df), scala.math.round(.99*m*n)).toInt + val df = rank * (m + n - rank) + val sampSize = scala.math.min(scala.math.round(trainSampFact * df), + scala.math.round(.99 * m * n)).toInt val rand = new Random() - val mn = m*n + val mn = m * n val shuffled = rand.shuffle(1 to mn toIterable) - val omega = shuffled.slice(0,sampsize) + val omega = shuffled.slice(0, sampSize) val ordered = omega.sortWith(_ < _).toArray val trainData: RDD[(Int, Int, Double)] = sc.parallelize(ordered) - .map(x => (fullData.indexRows(x-1),fullData.indexColumns(x-1),fullData.get(x-1))) + .map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1))) // optionally add gaussian noise - if(noise){ - trainData.map(x => (x._1,x._2,x._3+rand.nextGaussian*sigma)) + if (noise) { + trainData.map(x => (x._1, x._2, x._3 + rand.nextGaussian * sigma)) } trainData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) // optionally generate testing data - if(test){ - val test_sampsize = scala.math - .min(scala.math.round(sampsize*te_samp_fact),scala.math.round(mn-sampsize)) - .toInt - val test_omega = shuffled.slice(sampsize,sampsize+test_sampsize) - val test_ordered = test_omega.sortWith(_ < _).toArray - val testData: RDD[(Int, Int, Double)] = sc.parallelize(test_ordered) - .map(x=> (fullData.indexRows(x-1),fullData.indexColumns(x-1),fullData.get(x-1))) + if (test) { + val testSampSize = scala.math + .min(scala.math.round(sampSize * testSampFact),scala.math.round(mn - sampSize)).toInt + val testOmega = shuffled.slice(sampSize, sampSize + testSampSize) + val testOrdered = testOmega.sortWith(_ < _).toArray + val testData: RDD[(Int, Int, Double)] = sc.parallelize(testOrdered) + .map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1))) testData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) } - sc.stop() + sc.stop() + } } \ No newline at end of file -- cgit v1.2.3