From 86534d0f5255362618c05a07b0171ec35c915822 Mon Sep 17 00:00:00 2001 From: Michael Armbrust Date: Tue, 29 Jul 2014 18:20:51 -0700 Subject: [SPARK-2631][SQL] Use SQLConf to configure in-memory columnar caching Author: Michael Armbrust Closes #1638 from marmbrus/cachedConfig and squashes the following commits: 2362082 [Michael Armbrust] Use SQLConf to configure in-memory columnar caching --- sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala | 4 ++++ sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala index be8d4e15ec..5d85a0fd4e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala @@ -22,6 +22,7 @@ import java.util.Properties import scala.collection.JavaConverters._ object SQLConf { + val COMPRESS_CACHED = "spark.sql.inMemoryColumnarStorage.compressed" val AUTO_BROADCASTJOIN_THRESHOLD = "spark.sql.autoBroadcastJoinThreshold" val SHUFFLE_PARTITIONS = "spark.sql.shuffle.partitions" val DEFAULT_SIZE_IN_BYTES = "spark.sql.defaultSizeInBytes" @@ -49,6 +50,9 @@ trait SQLConf { /** ************************ Spark SQL Params/Hints ******************* */ // TODO: refactor so that these hints accessors don't pollute the name space of SQLContext? + /** When true tables cached using the in-memory columnar caching will be compressed. */ + private[spark] def useCompression: Boolean = get(COMPRESS_CACHED, "false").toBoolean + /** Number of partitions to use for shuffle operators. */ private[spark] def numShufflePartitions: Int = get(SHUFFLE_PARTITIONS, "200").toInt diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index a136c7b3ff..c2bdef7323 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -192,8 +192,6 @@ class SQLContext(@transient val sparkContext: SparkContext) currentTable.logicalPlan case _ => - val useCompression = - sparkContext.conf.getBoolean("spark.sql.inMemoryColumnarStorage.compressed", false) InMemoryRelation(useCompression, executePlan(currentTable).executedPlan) } -- cgit v1.2.3