aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala18
2 files changed, 9 insertions, 13 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
index d7d7176c48..200e356c72 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
@@ -77,8 +77,6 @@ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("overwrite").parquet(path)
- assert(df.count() == 1000)
- spark.catalog.refreshByPath(path)
assert(df.count() == 10)
assert(spark.read.parquet(path).count() == 10)
}
@@ -91,8 +89,6 @@ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("append").parquet(path)
- assert(df.count() == 1000)
- spark.catalog.refreshByPath(path)
assert(df.count() == 1010)
assert(spark.read.parquet(path).count() == 1010)
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
index 19835cd184..2eae66dda8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala
@@ -281,15 +281,15 @@ class InsertSuite extends DataSourceTest with SharedSQLContext {
""".stripMargin)
// jsonTable should be recached.
assertCached(sql("SELECT * FROM jsonTable"))
- // TODO we need to invalidate the cached data in InsertIntoHadoopFsRelation
-// // The cached data is the new data.
-// checkAnswer(
-// sql("SELECT a, b FROM jsonTable"),
-// sql("SELECT a * 2, b FROM jt").collect())
-//
-// // Verify uncaching
-// spark.catalog.uncacheTable("jsonTable")
-// assertCached(sql("SELECT * FROM jsonTable"), 0)
+
+ // The cached data is the new data.
+ checkAnswer(
+ sql("SELECT a, b FROM jsonTable"),
+ sql("SELECT a * 2, b FROM jt").collect())
+
+ // Verify uncaching
+ spark.catalog.uncacheTable("jsonTable")
+ assertCached(sql("SELECT * FROM jsonTable"), 0)
}
test("it's not allowed to insert into a relation that is not an InsertableRelation") {