aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorLiwei Lin <lwlin7@gmail.com>2016-04-14 10:14:38 -0700
committerReynold Xin <rxin@databricks.com>2016-04-14 10:14:38 -0700
commit3e27940a19e7bab448f1af11d2065ecd1ec66197 (patch)
tree76981c9be102eb396cb9be433b52143b18fd2005 /sql/core/src
parentde2ad52855aee3c60bbc4642afb180d6fe62173b (diff)
downloadspark-3e27940a19e7bab448f1af11d2065ecd1ec66197.tar.gz
spark-3e27940a19e7bab448f1af11d2065ecd1ec66197.tar.bz2
spark-3e27940a19e7bab448f1af11d2065ecd1ec66197.zip
[SPARK-14630][BUILD][CORE][SQL][STREAMING] Code style: public abstract methods should have explicit return types
## What changes were proposed in this pull request? Currently many public abstract methods (in abstract classes as well as traits) don't declare return types explicitly, such as in [o.a.s.streaming.dstream.InputDStream](https://github.com/apache/spark/blob/master/streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala#L110): ```scala def start() // should be: def start(): Unit def stop() // should be: def stop(): Unit ``` These methods exist in core, sql, streaming; this PR fixes them. ## How was this patch tested? N/A ## Which piece of scala style rule led to the changes? the rule was added separately in https://github.com/apache/spark/pull/12396 Author: Liwei Lin <lwlin7@gmail.com> Closes #12389 from lw-lin/public-abstract-methods.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnAccessor.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/util/ContinuousQueryListener.scala6
4 files changed, 7 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnAccessor.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnAccessor.scala
index 78664baa56..7cde04b626 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnAccessor.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnAccessor.scala
@@ -38,7 +38,7 @@ private[columnar] trait ColumnAccessor {
def hasNext: Boolean
- def extractTo(row: MutableRow, ordinal: Int)
+ def extractTo(row: MutableRow, ordinal: Int): Unit
protected def underlyingBuffer: ByteBuffer
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
index 9a173367f4..d30655e0c4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnBuilder.scala
@@ -28,12 +28,12 @@ private[columnar] trait ColumnBuilder {
/**
* Initializes with an approximate lower bound on the expected number of elements in this column.
*/
- def initialize(initialSize: Int, columnName: String = "", useCompression: Boolean = false)
+ def initialize(initialSize: Int, columnName: String = "", useCompression: Boolean = false): Unit
/**
* Appends `row(ordinal)` to the column builder.
*/
- def appendFrom(row: InternalRow, ordinal: Int)
+ def appendFrom(row: InternalRow, ordinal: Int): Unit
/**
* Column statistics information
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
index cc5327e0e2..9521506325 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
@@ -50,7 +50,7 @@ trait StateStore {
def get(key: UnsafeRow): Option[UnsafeRow]
/** Put a new value for a key. */
- def put(key: UnsafeRow, value: UnsafeRow)
+ def put(key: UnsafeRow, value: UnsafeRow): Unit
/**
* Remove keys that match the following condition.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/util/ContinuousQueryListener.scala b/sql/core/src/main/scala/org/apache/spark/sql/util/ContinuousQueryListener.scala
index bf78be9d9f..ba1facf11b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/util/ContinuousQueryListener.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/util/ContinuousQueryListener.scala
@@ -37,7 +37,7 @@ abstract class ContinuousQueryListener {
* `DataFrameWriter.startStream()` returns the corresponding [[ContinuousQuery]]. Please
* don't block this method as it will block your query.
*/
- def onQueryStarted(queryStarted: QueryStarted)
+ def onQueryStarted(queryStarted: QueryStarted): Unit
/**
* Called when there is some status update (ingestion rate updated, etc.)
@@ -47,10 +47,10 @@ abstract class ContinuousQueryListener {
* may be changed before/when you process the event. E.g., you may find [[ContinuousQuery]]
* is terminated when you are processing [[QueryProgress]].
*/
- def onQueryProgress(queryProgress: QueryProgress)
+ def onQueryProgress(queryProgress: QueryProgress): Unit
/** Called when a query is stopped, with or without error */
- def onQueryTerminated(queryTerminated: QueryTerminated)
+ def onQueryTerminated(queryTerminated: QueryTerminated): Unit
}