aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2016-11-01 16:23:47 -0700
committerReynold Xin <rxin@databricks.com>2016-11-01 16:23:47 -0700
commit6e6298154aba63831a292117797798131a646869 (patch)
tree83851dd672c106050ad78f521e0b22807ad1b15c /sql/hive-thriftserver
parent01dd0083011741c2bbe5ae1d2a25f2c9a1302b76 (diff)
downloadspark-6e6298154aba63831a292117797798131a646869.tar.gz
spark-6e6298154aba63831a292117797798131a646869.tar.bz2
spark-6e6298154aba63831a292117797798131a646869.zip
[SPARK-17350][SQL] Disable default use of KryoSerializer in Thrift Server
In SPARK-4761 / #3621 (December 2014) we enabled Kryo serialization by default in the Spark Thrift Server. However, I don't think that the original rationale for doing this still holds now that most Spark SQL serialization is now performed via encoders and our UnsafeRow format. In addition, the use of Kryo as the default serializer can introduce performance problems because the creation of new KryoSerializer instances is expensive and we haven't performed instance-reuse optimizations in several code paths (including DirectTaskResult deserialization). Given all of this, I propose to revert back to using JavaSerializer as the default serializer in the Thrift Server. /cc liancheng Author: Josh Rosen <joshrosen@databricks.com> Closes #14906 from JoshRosen/disable-kryo-in-thriftserver.
Diffstat (limited to 'sql/hive-thriftserver')
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala10
1 files changed, 0 insertions, 10 deletions
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
index 638911599a..78a309497a 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnv.scala
@@ -19,8 +19,6 @@ package org.apache.spark.sql.hive.thriftserver
import java.io.PrintStream
-import scala.collection.JavaConverters._
-
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{SparkSession, SQLContext}
@@ -37,8 +35,6 @@ private[hive] object SparkSQLEnv extends Logging {
def init() {
if (sqlContext == null) {
val sparkConf = new SparkConf(loadDefaults = true)
- val maybeSerializer = sparkConf.getOption("spark.serializer")
- val maybeKryoReferenceTracking = sparkConf.getOption("spark.kryo.referenceTracking")
// If user doesn't specify the appName, we want to get [SparkSQL::localHostName] instead of
// the default appName [SparkSQLCLIDriver] in cli or beeline.
val maybeAppName = sparkConf
@@ -47,12 +43,6 @@ private[hive] object SparkSQLEnv extends Logging {
sparkConf
.setAppName(maybeAppName.getOrElse(s"SparkSQL::${Utils.localHostName()}"))
- .set(
- "spark.serializer",
- maybeSerializer.getOrElse("org.apache.spark.serializer.KryoSerializer"))
- .set(
- "spark.kryo.referenceTracking",
- maybeKryoReferenceTracking.getOrElse("false"))
val sparkSession = SparkSession.builder.config(sparkConf).enableHiveSupport().getOrCreate()
sparkContext = sparkSession.sparkContext