aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorJacky Li <jacky.likun@huawei.com>2015-02-26 10:40:58 -0800
committerReynold Xin <rxin@databricks.com>2015-02-26 10:40:58 -0800
commit2358657547016d647cdd2e2d363426fcd8d3e9ff (patch)
tree11ae09948aafbcd7866762ab2f00f1a77ff0bebf /sql/core
parentdf3d559b32f1ceb8ca3491e2a1169c56a6faab58 (diff)
downloadspark-2358657547016d647cdd2e2d363426fcd8d3e9ff.tar.gz
spark-2358657547016d647cdd2e2d363426fcd8d3e9ff.tar.bz2
spark-2358657547016d647cdd2e2d363426fcd8d3e9ff.zip
[SPARK-6007][SQL] Add numRows param in DataFrame.show()
It is useful to let the user decide the number of rows to show in DataFrame.show Author: Jacky Li <jacky.likun@huawei.com> Closes #4767 from jackylk/show and squashes the following commits: a0e0f4b [Jacky Li] fix testcase 7cdbe91 [Jacky Li] modify according to comment bb54537 [Jacky Li] for Java compatibility d7acc18 [Jacky Li] modify according to comments 981be52 [Jacky Li] add numRows param in DataFrame.show()
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala13
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java9
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala5
3 files changed, 24 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index f045da305c..060ab5e9a0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -159,9 +159,10 @@ class DataFrame protected[sql](
/**
* Internal API for Python
+ * @param numRows Number of rows to show
*/
- private[sql] def showString(): String = {
- val data = take(20)
+ private[sql] def showString(numRows: Int): String = {
+ val data = take(numRows)
val numCols = schema.fieldNames.length
// For cells that are beyond 20 characters, replace it with the first 17 and "..."
@@ -293,9 +294,15 @@ class DataFrame protected[sql](
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
+ * @param numRows Number of rows to show
* @group basic
*/
- def show(): Unit = println(showString())
+ def show(numRows: Int): Unit = println(showString(numRows))
+
+ /**
+ * Displays the top 20 rows of [[DataFrame]] in a tabular form.
+ */
+ def show(): Unit = show(20)
/**
* Cartesian join with another [[DataFrame]].
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
index c1c51f80d6..2d586f784a 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
@@ -20,6 +20,7 @@ package test.org.apache.spark.sql;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.apache.spark.sql.*;
@@ -81,4 +82,12 @@ public class JavaDataFrameSuite {
df.groupBy().agg(countDistinct(col("key"), col("value")));
df.select(coalesce(col("key")));
}
+
+ @Ignore
+ public void testShow() {
+ // This test case is intended ignored, but to make sure it compiles correctly
+ DataFrame df = context.table("testData");
+ df.show();
+ df.show(1000);
+ }
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index c392a553c0..ff441ef26f 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -441,4 +441,9 @@ class DataFrameSuite extends QueryTest {
checkAnswer(df.select(df("key")), testData.select('key).collect().toSeq)
}
+ ignore("show") {
+ // This test case is intended ignored, but to make sure it compiles correctly
+ testData.select($"*").show()
+ testData.select($"*").show(1000)
+ }
}