diff options
author | Zheng RuiFeng <ruifengz@foxmail.com> | 2016-05-20 16:40:33 -0700 |
---|---|---|
committer | Andrew Or <andrew@databricks.com> | 2016-05-20 16:40:33 -0700 |
commit | 127bf1bb07967e2e4f99ad7abaa7f6fab3b3f407 (patch) | |
tree | a127031cd361df2f1d895cb11489f8e183c76f73 /sql | |
parent | 06c9f520714e07259c6f8ce6f9ea5a230a278cb5 (diff) | |
download | spark-127bf1bb07967e2e4f99ad7abaa7f6fab3b3f407.tar.gz spark-127bf1bb07967e2e4f99ad7abaa7f6fab3b3f407.tar.bz2 spark-127bf1bb07967e2e4f99ad7abaa7f6fab3b3f407.zip |
[SPARK-15031][EXAMPLE] Use SparkSession in examples
## What changes were proposed in this pull request?
Use `SparkSession` according to [SPARK-15031](https://issues.apache.org/jira/browse/SPARK-15031)
`MLLLIB` is not recommended to use now, so examples in `MLLIB` are ignored in this PR.
`StreamingContext` can not be directly obtained from `SparkSession`, so example in `Streaming` are ignored too.
cc andrewor14
## How was this patch tested?
manual tests with spark-submit
Author: Zheng RuiFeng <ruifengz@foxmail.com>
Closes #13164 from zhengruifeng/use_sparksession_ii.
Diffstat (limited to 'sql')
-rw-r--r-- | sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java index 35a9f44fec..1e8f1062c5 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java @@ -168,8 +168,8 @@ public class JavaDataFrameSuite { Assert.assertEquals( new StructField("d", new ArrayType(DataTypes.StringType, true), true, Metadata.empty()), schema.apply("d")); - Assert.assertEquals(new StructField("e", DataTypes.createDecimalType(38,0), true, Metadata.empty()), - schema.apply("e")); + Assert.assertEquals(new StructField("e", DataTypes.createDecimalType(38,0), true, + Metadata.empty()), schema.apply("e")); Row first = df.select("a", "b", "c", "d", "e").first(); Assert.assertEquals(bean.getA(), first.getDouble(0), 0.0); // Now Java lists and maps are converted to Scala Seq's and Map's. Once we get a Seq below, @@ -189,7 +189,7 @@ public class JavaDataFrameSuite { for (int i = 0; i < d.length(); i++) { Assert.assertEquals(bean.getD().get(i), d.apply(i)); } - // Java.math.BigInteger is equavient to Spark Decimal(38,0) + // Java.math.BigInteger is equavient to Spark Decimal(38,0) Assert.assertEquals(new BigDecimal(bean.getE()), first.getDecimal(4)); } |