aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorJonathan Alter <jonalter@users.noreply.github.com>2015-07-10 11:34:01 +0100
committerSean Owen <sowen@cloudera.com>2015-07-10 11:34:01 +0100
commite14b545d2dcbc4587688b4c46718d3680b0a2f67 (patch)
tree8ae5fe8258bce4fd1cb2d55c7031de8b1dcc3963 /sql
parentd538919cc4fd3ab940d478c62dce1bae0270cfeb (diff)
downloadspark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.gz
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.tar.bz2
spark-e14b545d2dcbc4587688b4c46718d3680b0a2f67.zip
[SPARK-7977] [BUILD] Disallowing println
Author: Jonathan Alter <jonalter@users.noreply.github.com> Closes #7093 from jonalter/SPARK-7977 and squashes the following commits: ccd44cc [Jonathan Alter] Changed println to log in ThreadingSuite 7fcac3e [Jonathan Alter] Reverting to println in ThreadingSuite 10724b6 [Jonathan Alter] Changing some printlns to logs in tests eeec1e7 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0b1dcb4 [Jonathan Alter] More println cleanup aedaf80 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 925fd98 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 0c16fa3 [Jonathan Alter] Replacing some printlns with logs 45c7e05 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 5c8e283 [Jonathan Alter] Allowing println in audit-release examples 5b50da1 [Jonathan Alter] Allowing printlns in example files ca4b477 [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 83ab635 [Jonathan Alter] Fixing new printlns 54b131f [Jonathan Alter] Merge branch 'master' of github.com:apache/spark into SPARK-7977 1cd8a81 [Jonathan Alter] Removing some unnecessary comments and printlns b837c3a [Jonathan Alter] Disallowing println
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala16
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala12
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala5
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala5
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala2
-rw-r--r--sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala1
17 files changed, 50 insertions, 25 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
index 7f1b12cdd5..606fecbe06 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala
@@ -67,8 +67,10 @@ package object codegen {
outfile.write(generatedBytes)
outfile.close()
+ // scalastyle:off println
println(
s"javap -p -v -classpath ${dumpDirectory.getCanonicalPath} ${generatedClass.getName}".!!)
+ // scalastyle:on println
}
}
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 2f545bb432..b89e3382f0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -154,7 +154,9 @@ abstract class QueryPlan[PlanType <: TreeNode[PlanType]] extends TreeNode[PlanTy
def schemaString: String = schema.treeString
/** Prints out the schema in the tree format */
+ // scalastyle:off println
def printSchema(): Unit = println(schemaString)
+ // scalastyle:on println
/**
* A prefix string used when printing the plan.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
index 07054166a5..71293475ca 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/package.scala
@@ -124,7 +124,9 @@ package object util {
val startTime = System.nanoTime()
val ret = f
val endTime = System.nanoTime()
+ // scalastyle:off println
println(s"${(endTime - startTime).toDouble / 1000000}ms")
+ // scalastyle:on println
ret
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
index e0b8ff9178..b8097403ec 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala
@@ -250,7 +250,9 @@ case class StructType(fields: Array[StructField]) extends DataType with Seq[Stru
builder.toString()
}
+ // scalastyle:off println
def printTreeString(): Unit = println(treeString)
+ // scalastyle:on println
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
fields.foreach(field => field.buildFormattedString(prefix, builder))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index f201c8ea8a..1025026462 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -860,11 +860,13 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* @since 1.3.0
*/
def explain(extended: Boolean): Unit = {
+ // scalastyle:off println
if (extended) {
println(expr)
} else {
println(expr.prettyString)
}
+ // scalastyle:on println
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index d7966651b1..830fba35bb 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -308,7 +308,9 @@ class DataFrame private[sql](
* @group basic
* @since 1.3.0
*/
+ // scalastyle:off println
def printSchema(): Unit = println(schema.treeString)
+ // scalastyle:on println
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
@@ -319,7 +321,9 @@ class DataFrame private[sql](
ExplainCommand(
queryExecution.logical,
extended = extended).queryExecution.executedPlan.executeCollect().map {
+ // scalastyle:off println
r => println(r.getString(0))
+ // scalastyle:on println
}
}
@@ -392,7 +396,9 @@ class DataFrame private[sql](
* @group action
* @since 1.5.0
*/
+ // scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = println(showString(numRows, truncate))
+ // scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
index 2964edac1a..e6081cb05b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
@@ -24,7 +24,7 @@ import org.apache.spark.unsafe.types.UTF8String
import scala.collection.mutable.HashSet
-import org.apache.spark.{AccumulatorParam, Accumulator}
+import org.apache.spark.{AccumulatorParam, Accumulator, Logging}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
@@ -57,7 +57,7 @@ package object debug {
* Augments [[DataFrame]]s with debug methods.
*/
@DeveloperApi
- implicit class DebugQuery(query: DataFrame) {
+ implicit class DebugQuery(query: DataFrame) extends Logging {
def debug(): Unit = {
val plan = query.queryExecution.executedPlan
val visited = new collection.mutable.HashSet[TreeNodeRef]()
@@ -66,7 +66,7 @@ package object debug {
visited += new TreeNodeRef(s)
DebugNode(s)
}
- println(s"Results returned: ${debugPlan.execute().count()}")
+ logDebug(s"Results returned: ${debugPlan.execute().count()}")
debugPlan.foreach {
case d: DebugNode => d.dumpStats()
case _ =>
@@ -82,11 +82,11 @@ package object debug {
TypeCheck(s)
}
try {
- println(s"Results returned: ${debugPlan.execute().count()}")
+ logDebug(s"Results returned: ${debugPlan.execute().count()}")
} catch {
case e: Exception =>
def unwrap(e: Throwable): Throwable = if (e.getCause == null) e else unwrap(e.getCause)
- println(s"Deepest Error: ${unwrap(e)}")
+ logDebug(s"Deepest Error: ${unwrap(e)}")
}
}
}
@@ -119,11 +119,11 @@ package object debug {
val columnStats: Array[ColumnMetrics] = Array.fill(child.output.size)(new ColumnMetrics())
def dumpStats(): Unit = {
- println(s"== ${child.simpleString} ==")
- println(s"Tuples output: ${tupleCount.value}")
+ logDebug(s"== ${child.simpleString} ==")
+ logDebug(s"Tuples output: ${tupleCount.value}")
child.output.zip(columnStats).foreach { case(attr, metric) =>
val actualDataTypes = metric.elementTypes.value.mkString("{", ",", "}")
- println(s" ${attr.name} ${attr.dataType}: $actualDataTypes")
+ logDebug(s" ${attr.name} ${attr.dataType}: $actualDataTypes")
}
}
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 039cfa40d2..f66a17b209 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -40,7 +40,7 @@ import org.apache.spark.Logging
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.util.Utils
-private[hive] object SparkSQLCLIDriver {
+private[hive] object SparkSQLCLIDriver extends Logging {
private var prompt = "spark-sql"
private var continuedPrompt = "".padTo(prompt.length, ' ')
private var transport: TSocket = _
@@ -164,7 +164,7 @@ private[hive] object SparkSQLCLIDriver {
}
} catch {
case e: FileNotFoundException =>
- System.err.println(s"Could not open input file for reading. (${e.getMessage})")
+ logError(s"Could not open input file for reading. (${e.getMessage})")
System.exit(3)
}
@@ -180,14 +180,14 @@ private[hive] object SparkSQLCLIDriver {
val historyFile = historyDirectory + File.separator + ".hivehistory"
reader.setHistory(new History(new File(historyFile)))
} else {
- System.err.println("WARNING: Directory for Hive history file: " + historyDirectory +
+ logWarning("WARNING: Directory for Hive history file: " + historyDirectory +
" does not exist. History will not be available during this session.")
}
} catch {
case e: Exception =>
- System.err.println("WARNING: Encountered an error while trying to initialize Hive's " +
+ logWarning("WARNING: Encountered an error while trying to initialize Hive's " +
"history file. History will not be available during this session.")
- System.err.println(e.getMessage)
+ logWarning(e.getMessage)
}
val clientTransportTSocketField = classOf[CliSessionState].getDeclaredField("transport")
@@ -270,6 +270,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
val proc: CommandProcessor = CommandProcessorFactory.get(Array(tokens(0)), hconf)
if (proc != null) {
+ // scalastyle:off println
if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor] ||
proc.isInstanceOf[AddResourceProcessor]) {
val driver = new SparkSQLDriver
@@ -336,6 +337,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
}
ret = proc.run(cmd_1).getResponseCode
}
+ // scalastyle:on println
}
ret
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index bbc39b892b..4684d48aff 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.parse.VariableSubstitution
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde2.io.{DateWritable, TimestampWritable}
+import org.apache.spark.Logging
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental
import org.apache.spark.sql._
@@ -65,12 +66,12 @@ private[hive] class HiveQLDialect extends ParserDialect {
*
* @since 1.0.0
*/
-class HiveContext(sc: SparkContext) extends SQLContext(sc) {
+class HiveContext(sc: SparkContext) extends SQLContext(sc) with Logging {
self =>
import HiveContext._
- println("create HiveContext")
+ logDebug("create HiveContext")
/**
* When true, enables an experimental feature where metastore tables that use the parquet SerDe
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index 2de7a99c12..7fc517b646 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.parse._
import org.apache.hadoop.hive.ql.plan.PlanUtils
import org.apache.hadoop.hive.ql.session.SessionState
+import org.apache.spark.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
@@ -73,7 +74,7 @@ private[hive] case class CreateTableAsSelect(
}
/** Provides a mapping from HiveQL statements to catalyst logical plans and expression trees. */
-private[hive] object HiveQl {
+private[hive] object HiveQl extends Logging {
protected val nativeCommands = Seq(
"TOK_ALTERDATABASE_OWNER",
"TOK_ALTERDATABASE_PROPERTIES",
@@ -186,7 +187,7 @@ private[hive] object HiveQl {
.map(ast => Option(ast).map(_.transform(rule)).orNull))
} catch {
case e: Exception =>
- println(dumpTree(n))
+ logError(dumpTree(n).toString)
throw e
}
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
index cbd2bf6b5e..9d83ca6c11 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/ClientWrapper.scala
@@ -360,7 +360,9 @@ private[hive] class ClientWrapper(
case _ =>
if (state.out != null) {
+ // scalastyle:off println
state.out.println(tokens(0) + " " + cmd_1)
+ // scalastyle:on println
}
Seq(proc.run(cmd_1).getResponseCode.toString)
}
diff --git a/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala b/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
index 0e428ba1d7..2590040f2e 100644
--- a/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
+++ b/sql/hive/src/test/resources/regression-test-SPARK-8489/Main.scala
@@ -30,6 +30,7 @@ import org.apache.spark.sql.hive.HiveContext
*/
object Main {
def main(args: Array[String]) {
+ // scalastyle:off println
println("Running regression test for SPARK-8489.")
val sc = new SparkContext("local", "testing")
val hc = new HiveContext(sc)
@@ -38,6 +39,7 @@ object Main {
val df = hc.createDataFrame(Seq(MyCoolClass("1", "2", "3")))
df.collect()
println("Regression test for SPARK-8489 success!")
+ // scalastyle:on println
sc.stop()
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index e9bb326679..983c013bcf 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -17,13 +17,13 @@
package org.apache.spark.sql.hive
-import org.apache.spark.SparkFunSuite
+import org.apache.spark.{Logging, SparkFunSuite}
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.test.ExamplePointUDT
import org.apache.spark.sql.types.StructType
-class HiveMetastoreCatalogSuite extends SparkFunSuite {
+class HiveMetastoreCatalogSuite extends SparkFunSuite with Logging {
test("struct field should accept underscore in sub-column name") {
val metastr = "struct<a: int, b_1: string, c: string>"
@@ -41,7 +41,7 @@ class HiveMetastoreCatalogSuite extends SparkFunSuite {
test("duplicated metastore relations") {
import TestHive.implicits._
val df = TestHive.sql("SELECT * FROM src")
- println(df.queryExecution)
+ logInfo(df.queryExecution.toString)
df.as('a).join(df.as('b), $"a.key" === $"b.key")
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
index a38ed23b5c..917900e5f4 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala
@@ -90,8 +90,10 @@ class HiveSparkSubmitSuite
"SPARK_TESTING" -> "1",
"SPARK_HOME" -> sparkHome
).run(ProcessLogger(
+ // scalastyle:off println
(line: String) => { println(s"out> $line") },
(line: String) => { println(s"err> $line") }
+ // scalastyle:on println
))
try {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index aa5dbe2db6..508695919e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -86,8 +86,6 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
val message = intercept[QueryExecutionException] {
sql("CREATE TABLE doubleCreateAndInsertTest (key int, value string)")
}.getMessage
-
- println("message!!!!" + message)
}
test("Double create does not fail when allowExisting = true") {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index cc294bc3e8..d910af22c3 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -26,6 +26,7 @@ import org.scalatest.BeforeAndAfterAll
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.InvalidInputException
+import org.apache.spark.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.hive.client.{HiveTable, ManagedTable}
import org.apache.spark.sql.hive.test.TestHive
@@ -40,7 +41,8 @@ import org.apache.spark.util.Utils
/**
* Tests for persisting tables created though the data sources API into the metastore.
*/
-class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
+class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll
+ with Logging {
override val sqlContext = TestHive
var jsonFilePath: String = _
@@ -415,7 +417,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeA
|)
""".stripMargin)
- sql("DROP TABLE jsonTable").collect().foreach(println)
+ sql("DROP TABLE jsonTable").collect().foreach(i => logInfo(i.toString))
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index eaaa88e170..1bde5922b5 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -315,7 +315,6 @@ class PairUDF extends GenericUDF {
)
override def evaluate(args: Array[DeferredObject]): AnyRef = {
- println("Type = %s".format(args(0).getClass.getName))
Integer.valueOf(args(0).get.asInstanceOf[TestPair].entry._2)
}