aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2014-08-27 15:14:08 -0700
committerMichael Armbrust <michael@databricks.com>2014-08-27 15:14:08 -0700
commit7d2a7a91f263bb9fbf24dc4dbffde8fe5e2c7442 (patch)
treed42df0d5dfe23d3e4cea4f833a863afa1a0e6d13 /sql/core/src/main
parent65253502b913f390b26b9b631380b2c6cf1ccdf7 (diff)
downloadspark-7d2a7a91f263bb9fbf24dc4dbffde8fe5e2c7442.tar.gz
spark-7d2a7a91f263bb9fbf24dc4dbffde8fe5e2c7442.tar.bz2
spark-7d2a7a91f263bb9fbf24dc4dbffde8fe5e2c7442.zip
[SPARK-3235][SQL] Ensure in-memory tables don't always broadcast.
Author: Michael Armbrust <michael@databricks.com> Closes #2147 from marmbrus/inMemDefaultSize and squashes the following commits: 5390360 [Michael Armbrust] Merge remote-tracking branch 'origin/master' into inMemDefaultSize 14204d3 [Michael Armbrust] Set the context before creating SparkLogicalPlans. 8da4414 [Michael Armbrust] Make sure we throw errors when leaf nodes fail to provide statistcs 18ce029 [Michael Armbrust] Ensure in-memory tables don't always broadcast.
Diffstat (limited to 'sql/core/src/main')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala3
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala2
3 files changed, 7 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 6f0eed3f63..a75af94d29 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -89,8 +89,10 @@ class SQLContext(@transient val sparkContext: SparkContext)
*
* @group userf
*/
- implicit def createSchemaRDD[A <: Product: TypeTag](rdd: RDD[A]) =
+ implicit def createSchemaRDD[A <: Product: TypeTag](rdd: RDD[A]) = {
+ SparkPlan.currentContext.set(self)
new SchemaRDD(this, SparkLogicalPlan(ExistingRdd.fromProductRdd(rdd))(self))
+ }
/**
* :: DeveloperApi ::
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
index 24e88eea31..bc36bacd00 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
@@ -39,6 +39,9 @@ private[sql] case class InMemoryRelation(
(private var _cachedColumnBuffers: RDD[Array[ByteBuffer]] = null)
extends LogicalPlan with MultiInstanceRelation {
+ override lazy val statistics =
+ Statistics(sizeInBytes = child.sqlContext.defaultSizeInBytes)
+
// If the cached column buffers were not passed in, we calculate them in the constructor.
// As in Spark, the actual work of caching is lazy.
if (_cachedColumnBuffers == null) {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index 7d33ea5b02..2b8913985b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -49,7 +49,7 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializ
* populated by the query planning infrastructure.
*/
@transient
- protected val sqlContext = SparkPlan.currentContext.get()
+ protected[spark] val sqlContext = SparkPlan.currentContext.get()
protected def sparkContext = sqlContext.sparkContext