aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
author云峤 <chensong.cs@alibaba-inc.com>2015-05-04 12:08:38 -0700
committerReynold Xin <rxin@databricks.com>2015-05-04 12:08:38 -0700
commitf32e69ecc333867fc966f65cd0aeaeddd43e0945 (patch)
tree2dbcc9704acb83d0a7b40f3cee8cc084e29f6dd9 /sql
parente0833c5958bbd73ff27cfe6865648d7b6e5a99bc (diff)
downloadspark-f32e69ecc333867fc966f65cd0aeaeddd43e0945.tar.gz
spark-f32e69ecc333867fc966f65cd0aeaeddd43e0945.tar.bz2
spark-f32e69ecc333867fc966f65cd0aeaeddd43e0945.zip
[SPARK-7319][SQL] Improve the output from DataFrame.show()
Author: 云峤 <chensong.cs@alibaba-inc.com> Closes #5865 from kaka1992/df.show and squashes the following commits: c79204b [云峤] Update a1338f6 [云峤] Update python dataFrame show test and add empty df unit test. 734369c [云峤] Update python dataFrame show test and add empty df unit test. 84aec3e [云峤] Update python dataFrame show test and add empty df unit test. 159b3d5 [云峤] update 03ef434 [云峤] update 7394fd5 [云峤] update test show ced487a [云峤] update pep8 b6e690b [云峤] Merge remote-tracking branch 'upstream/master' into df.show 30ac311 [云峤] [SPARK-7294] ADD BETWEEN 7d62368 [云峤] [SPARK-7294] ADD BETWEEN baf839b [云峤] [SPARK-7294] ADD BETWEEN d11d5b9 [云峤] [SPARK-7294] ADD BETWEEN
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala28
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala19
2 files changed, 41 insertions, 6 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index c421006c8f..cf344710ff 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql
import java.io.CharArrayWriter
import java.sql.DriverManager
+
import scala.collection.JavaConversions._
import scala.language.implicitConversions
import scala.reflect.ClassTag
@@ -28,6 +29,7 @@ import scala.util.control.NonFatal
import com.fasterxml.jackson.core.JsonFactory
+import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.python.SerDeUtil
@@ -175,6 +177,7 @@ class DataFrame private[sql](
* @param numRows Number of rows to show
*/
private[sql] def showString(numRows: Int): String = {
+ val sb = new StringBuilder
val data = take(numRows)
val numCols = schema.fieldNames.length
@@ -194,12 +197,25 @@ class DataFrame private[sql](
}
}
- // Pad the cells
- rows.map { row =>
- row.zipWithIndex.map { case (cell, i) =>
- String.format(s"%-${colWidths(i)}s", cell)
- }.mkString(" ")
- }.mkString("\n")
+ // Create SeparateLine
+ val sep: String = colWidths.map("-" * _).addString(sb, "+", "+", "+\n").toString()
+
+ // column names
+ rows.head.zipWithIndex.map { case (cell, i) =>
+ StringUtils.leftPad(cell.toString, colWidths(i))
+ }.addString(sb, "|", "|", "|\n")
+
+ sb.append(sep)
+
+ // data
+ rows.tail.map {
+ _.zipWithIndex.map { case (cell, i) =>
+ StringUtils.leftPad(cell.toString, colWidths(i))
+ }.addString(sb, "|", "|", "|\n")
+ }
+
+ sb.append(sep)
+ sb.toString()
}
override def toString: String = {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index e286fef23c..ff31e15e2d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -598,6 +598,25 @@ class DataFrameSuite extends QueryTest {
testData.select($"*").show(1000)
}
+ test("SPARK-7319 showString") {
+ val expectedAnswer = """+---+-----+
+ ||key|value|
+ |+---+-----+
+ || 1| 1|
+ |+---+-----+
+ |""".stripMargin
+ assert(testData.select($"*").showString(1) === expectedAnswer)
+ }
+
+ test("SPARK-7327 show with empty dataFrame") {
+ val expectedAnswer = """+---+-----+
+ ||key|value|
+ |+---+-----+
+ |+---+-----+
+ |""".stripMargin
+ assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
+ }
+
test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = TestSQLContext.sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))