aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-02-16 10:09:55 -0800
committerReynold Xin <rxin@databricks.com>2015-02-16 10:09:55 -0800
commit9baac56ccd57d3890a9b6439d4e13bbe9381822b (patch)
treea62d72bf875ccded9a8ffc31aec5d90ac883f841 /sql
parent5c78be7a515fc2fc92cda0517318e7b5d85762f4 (diff)
downloadspark-9baac56ccd57d3890a9b6439d4e13bbe9381822b.tar.gz
spark-9baac56ccd57d3890a9b6439d4e13bbe9381822b.tar.bz2
spark-9baac56ccd57d3890a9b6439d4e13bbe9381822b.zip
Minor fixes for commit https://github.com/apache/spark/pull/4592.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala9
2 files changed, 7 insertions, 8 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
index 9eb0c13140..500e3c90fd 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
@@ -83,17 +83,17 @@ private[sql] class DataFrameImpl protected[sql](
protected[sql] def resolve(colName: String): NamedExpression = {
queryExecution.analyzed.resolve(colName, sqlContext.analyzer.resolver).getOrElse {
- throw new RuntimeException(
+ throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
}
- protected[sql] def numericColumns(): Seq[Expression] = {
+ protected[sql] def numericColumns: Seq[Expression] = {
schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
queryExecution.analyzed.resolve(n.name, sqlContext.analyzer.resolver).get
}
}
-
+
override def toDF(colNames: String*): DataFrame = {
require(schema.size == colNames.size,
"The number of columns doesn't match.\n" +
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index a5a677b688..2ecf086de9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -17,8 +17,8 @@
package org.apache.spark.sql
-import scala.language.implicitConversions
import scala.collection.JavaConversions._
+import scala.language.implicitConversions
import org.apache.spark.sql.catalyst.analysis.Star
import org.apache.spark.sql.catalyst.expressions._
@@ -26,7 +26,6 @@ import org.apache.spark.sql.catalyst.plans.logical.Aggregate
import org.apache.spark.sql.types.NumericType
-
/**
* A set of methods for aggregations on a [[DataFrame]], created by [[DataFrame.groupBy]].
*/
@@ -48,13 +47,13 @@ class GroupedData protected[sql](df: DataFrameImpl, groupingExprs: Seq[Expressio
// No columns specified. Use all numeric columns.
df.numericColumns
} else {
- // Make sure all specified columns are numeric
+ // Make sure all specified columns are numeric.
colNames.map { colName =>
val namedExpr = df.resolve(colName)
if (!namedExpr.dataType.isInstanceOf[NumericType]) {
throw new AnalysisException(
s""""$colName" is not a numeric column. """ +
- "Aggregation function can only be performed on a numeric column.")
+ "Aggregation function can only be applied on a numeric column.")
}
namedExpr
}
@@ -64,7 +63,7 @@ class GroupedData protected[sql](df: DataFrameImpl, groupingExprs: Seq[Expressio
Alias(a, a.toString)()
}
}
-
+
private[this] def strToExpr(expr: String): (Expression => Expression) = {
expr.toLowerCase match {
case "avg" | "average" | "mean" => Average