aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSandeep Singh <sandeep@techaddict.me>2016-06-10 13:06:51 -0700
committerReynold Xin <rxin@databricks.com>2016-06-10 13:06:51 -0700
commit865ec32dd997e63aea01a871d1c7b4947f43c111 (patch)
tree62ddb4dba84262c8efe35d1c2d9157c7b2a1624f
parent667d4ea7b35f285954ea7cb719b7c80581e31f4d (diff)
downloadspark-865ec32dd997e63aea01a871d1c7b4947f43c111.tar.gz
spark-865ec32dd997e63aea01a871d1c7b4947f43c111.tar.bz2
spark-865ec32dd997e63aea01a871d1c7b4947f43c111.zip
[MINOR][X][X] Replace all occurrences of None: Option with Option.empty
## What changes were proposed in this pull request? Replace all occurrences of `None: Option[X]` with `Option.empty[X]` ## How was this patch tested? Exisiting Tests Author: Sandeep Singh <sandeep@techaddict.me> Closes #13591 from techaddict/minor-7.
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala10
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala2
6 files changed, 11 insertions, 11 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
index f924efe6e6..3cc7a1a3ca 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala
@@ -105,7 +105,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
*/
def find(f: BaseType => Boolean): Option[BaseType] = f(this) match {
case true => Some(this)
- case false => children.foldLeft(None: Option[BaseType]) { (l, r) => l.orElse(r.find(f)) }
+ case false => children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
}
/**
@@ -165,7 +165,7 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
def collectFirst[B](pf: PartialFunction[BaseType, B]): Option[B] = {
val lifted = pf.lift
lifted(this).orElse {
- children.foldLeft(None: Option[B]) { (l, r) => l.orElse(r.collectFirst(pf)) }
+ children.foldLeft(Option.empty[B]) { (l, r) => l.orElse(r.collectFirst(pf)) }
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 78b74f948e..1c2003c18e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -503,7 +503,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
private def insertInto(tableIdent: TableIdentifier): Unit = {
assertNotBucketed("insertInto")
assertNotStreaming("insertInto() can only be called on non-continuous queries")
- val partitions = normalizedParCols.map(_.map(col => col -> (None: Option[String])).toMap)
+ val partitions = normalizedParCols.map(_.map(col => col -> (Option.empty[String])).toMap)
val overwrite = mode == SaveMode.Overwrite
df.sparkSession.sessionState.executePlan(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
index 66753fa7f2..865e406ce2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
@@ -169,7 +169,7 @@ case class CreateDataSourceTableAsSelectCommand(
options
}
- var existingSchema = None: Option[StructType]
+ var existingSchema = Option.empty[StructType]
if (sparkSession.sessionState.catalog.tableExists(tableIdent)) {
// Check if we need to throw an exception or just return.
mode match {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala
index e18b59f49b..afe0fbea73 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala
@@ -129,7 +129,7 @@ case class ShuffleExchange(
object ShuffleExchange {
def apply(newPartitioning: Partitioning, child: SparkPlan): ShuffleExchange = {
- ShuffleExchange(newPartitioning, child, coordinator = None: Option[ExchangeCoordinator])
+ ShuffleExchange(newPartitioning, child, coordinator = Option.empty[ExchangeCoordinator])
}
/**
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
index 9771b2314a..e6c9c5d4d9 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
@@ -147,11 +147,11 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
test("save and load case class RDD with `None`s as orc") {
val data = (
- None: Option[Int],
- None: Option[Long],
- None: Option[Float],
- None: Option[Double],
- None: Option[Boolean]
+ Option.empty[Int],
+ Option.empty[Long],
+ Option.empty[Float],
+ Option.empty[Double],
+ Option.empty[Boolean]
) :: Nil
withOrcFile(data) { file =>
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
index c4bc5cf3f6..80c07958b4 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala
@@ -170,7 +170,7 @@ private[streaming] class WriteAheadLogBasedBlockHandler(
*/
def storeBlock(blockId: StreamBlockId, block: ReceivedBlock): ReceivedBlockStoreResult = {
- var numRecords = None: Option[Long]
+ var numRecords = Option.empty[Long]
// Serialize the block so that it can be inserted into both
val serializedBlock = block match {
case ArrayBufferBlock(arrayBuffer) =>