aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala9
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala6
2 files changed, 8 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
index fcf21ca741..cb88deab35 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
@@ -65,10 +65,11 @@ final class DataFrameStatFunctions private[sql](df: DataFrame) {
/**
* Computes a pair-wise frequency table of the given columns. Also known as a contingency table.
- * The number of distinct values for each column should be less than 1e4. The first
- * column of each row will be the distinct values of `col1` and the column names will be the
- * distinct values of `col2`. The name of the first column will be `$col1_$col2`. Counts will be
- * returned as `Long`s. Pairs that have no occurrences will have `null` as their counts.
+ * The number of distinct values for each column should be less than 1e4. At most 1e6 non-zero
+ * pair frequencies will be returned.
+ * The first column of each row will be the distinct values of `col1` and the column names will
+ * be the distinct values of `col2`. The name of the first column will be `$col1_$col2`. Counts
+ * will be returned as `Long`s. Pairs that have no occurrences will have `null` as their counts.
*
* @param col1 The name of the first column. Distinct items will make the first item of
* each row.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
index b50f606d9c..386ac969f1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala
@@ -102,9 +102,9 @@ private[sql] object StatFunctions extends Logging {
/** Generate a table of frequencies for the elements of two columns. */
private[sql] def crossTabulate(df: DataFrame, col1: String, col2: String): DataFrame = {
val tableName = s"${col1}_$col2"
- val counts = df.groupBy(col1, col2).agg(col(col1), col(col2), count("*")).take(1e8.toInt)
- if (counts.length == 1e8.toInt) {
- logWarning("The maximum limit of 1e8 pairs have been collected, which may not be all of " +
+ val counts = df.groupBy(col1, col2).agg(col(col1), col(col2), count("*")).take(1e6.toInt)
+ if (counts.length == 1e6.toInt) {
+ logWarning("The maximum limit of 1e6 pairs have been collected, which may not be all of " +
"the pairs. Please try reducing the amount of distinct items in your columns.")
}
// get the distinct values of column 2, so that we can make them the column names