aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala/org/apache
diff options
context:
space:
mode:
authorhyukjinkwon <gurwls223@gmail.com>2017-04-12 12:38:48 +0100
committerSean Owen <sowen@cloudera.com>2017-04-12 12:38:48 +0100
commitceaf77ae43a14e993ac6d1ff34b50256eacd6abb (patch)
tree34bdb8c1ebbf4df008dc95a6c1234f203fa1dc49 /sql/core/src/test/scala/org/apache
parent2e1fd46e12bf948490ece2caa73d227b6a924a14 (diff)
downloadspark-ceaf77ae43a14e993ac6d1ff34b50256eacd6abb.tar.gz
spark-ceaf77ae43a14e993ac6d1ff34b50256eacd6abb.tar.bz2
spark-ceaf77ae43a14e993ac6d1ff34b50256eacd6abb.zip
[SPARK-18692][BUILD][DOCS] Test Java 8 unidoc build on Jenkins
## What changes were proposed in this pull request? This PR proposes to run Spark unidoc to test Javadoc 8 build as Javadoc 8 is easily re-breakable. There are several problems with it: - It introduces little extra bit of time to run the tests. In my case, it took 1.5 mins more (`Elapsed :[94.8746569157]`). How it was tested is described in "How was this patch tested?". - > One problem that I noticed was that Unidoc appeared to be processing test sources: if we can find a way to exclude those from being processed in the first place then that might significantly speed things up. (see joshrosen's [comment](https://issues.apache.org/jira/browse/SPARK-18692?focusedCommentId=15947627&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-15947627)) To complete this automated build, It also suggests to fix existing Javadoc breaks / ones introduced by test codes as described above. There fixes are similar instances that previously fixed. Please refer https://github.com/apache/spark/pull/15999 and https://github.com/apache/spark/pull/16013 Note that this only fixes **errors** not **warnings**. Please see my observation https://github.com/apache/spark/pull/17389#issuecomment-288438704 for spurious errors by warnings. ## How was this patch tested? Manually via `jekyll build` for building tests. Also, tested via running `./dev/run-tests`. This was tested via manually adding `time.time()` as below: ```diff profiles_and_goals = build_profiles + sbt_goals print("[info] Building Spark unidoc (w/Hive 1.2.1) using SBT with these arguments: ", " ".join(profiles_and_goals)) + import time + st = time.time() exec_sbt(profiles_and_goals) + print("Elapsed :[%s]" % str(time.time() - st)) ``` produces ``` ... ======================================================================== Building Unidoc API Documentation ======================================================================== ... [info] Main Java API documentation successful. ... Elapsed :[94.8746569157] ... Author: hyukjinkwon <gurwls223@gmail.com> Closes #17477 from HyukjinKwon/SPARK-18692.
Diffstat (limited to 'sql/core/src/test/scala/org/apache')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala12
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala18
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala2
6 files changed, 26 insertions, 18 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala
index 0f3d0cefe3..92c5656f65 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSerializerRegistratorSuite.scala
@@ -56,7 +56,9 @@ object TestRegistrator {
def apply(): TestRegistrator = new TestRegistrator()
}
-/** A [[Serializer]] that takes a [[KryoData]] and serializes it as KryoData(0). */
+/**
+ * A `Serializer` that takes a [[KryoData]] and serializes it as KryoData(0).
+ */
class ZeroKryoDataSerializer extends Serializer[KryoData] {
override def write(kryo: Kryo, output: Output, t: KryoData): Unit = {
output.writeInt(0)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
index 26967782f7..2108b118bf 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala
@@ -44,8 +44,8 @@ abstract class FileStreamSourceTest
import testImplicits._
/**
- * A subclass [[AddData]] for adding data to files. This is meant to use the
- * [[FileStreamSource]] actually being used in the execution.
+ * A subclass `AddData` for adding data to files. This is meant to use the
+ * `FileStreamSource` actually being used in the execution.
*/
abstract class AddFileData extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
index 5ab9dc2bc7..13fe51a557 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
@@ -569,7 +569,7 @@ class ThrowingIOExceptionLikeHadoop12074 extends FakeSource {
object ThrowingIOExceptionLikeHadoop12074 {
/**
- * A latch to allow the user to wait until [[ThrowingIOExceptionLikeHadoop12074.createSource]] is
+ * A latch to allow the user to wait until `ThrowingIOExceptionLikeHadoop12074.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
@@ -600,7 +600,7 @@ class ThrowingInterruptedIOException extends FakeSource {
object ThrowingInterruptedIOException {
/**
- * A latch to allow the user to wait until [[ThrowingInterruptedIOException.createSource]] is
+ * A latch to allow the user to wait until `ThrowingInterruptedIOException.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
index 2ebbfcd22b..b69536ed37 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
@@ -642,8 +642,10 @@ class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging wi
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
- * When timeoutMs <= 0, awaitTermination() is tested (i.e. w/o timeout)
- * When timeoutMs > 0, awaitTermination(timeoutMs) is tested
+ * When timeoutMs is less than or equal to 0, awaitTermination() is
+ * tested (i.e. w/o timeout)
+ * When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
+ * tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
case class TestAwaitTermination(
@@ -667,8 +669,10 @@ class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging wi
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
- * When timeoutMs <= 0, awaitTermination() is tested (i.e. w/o timeout)
- * When timeoutMs > 0, awaitTermination(timeoutMs) is tested
+ * When timeoutMs is less than or equal to 0, awaitTermination() is
+ * tested (i.e. w/o timeout)
+ * When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
+ * tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
def assertOnQueryCondition(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
index cab219216d..6a4cc95d36 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
@@ -41,11 +41,11 @@ import org.apache.spark.util.{UninterruptibleThread, Utils}
/**
* Helper trait that should be extended by all SQL test suites.
*
- * This allows subclasses to plugin a custom [[SQLContext]]. It comes with test data
+ * This allows subclasses to plugin a custom `SQLContext`. It comes with test data
* prepared in advance as well as all implicit conversions used extensively by dataframes.
- * To use implicit methods, import `testImplicits._` instead of through the [[SQLContext]].
+ * To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
- * Subclasses should *not* create [[SQLContext]]s in the test suite constructor, which is
+ * Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtils
@@ -65,7 +65,7 @@ private[sql] trait SQLTestUtils
* A helper object for importing SQL implicits.
*
* Note that the alternative of importing `spark.implicits._` is not possible here.
- * This is because we create the [[SQLContext]] immediately before the first test is run,
+ * This is because we create the `SQLContext` immediately before the first test is run,
* but the implicits import is needed in the constructor.
*/
protected object testImplicits extends SQLImplicits {
@@ -73,7 +73,7 @@ private[sql] trait SQLTestUtils
}
/**
- * Materialize the test data immediately after the [[SQLContext]] is set up.
+ * Materialize the test data immediately after the `SQLContext` is set up.
* This is necessary if the data is accessed by name but not through direct reference.
*/
protected def setupTestData(): Unit = {
@@ -250,8 +250,8 @@ private[sql] trait SQLTestUtils
}
/**
- * Turn a logical plan into a [[DataFrame]]. This should be removed once we have an easier
- * way to construct [[DataFrame]] directly out of local data without relying on implicits.
+ * Turn a logical plan into a `DataFrame`. This should be removed once we have an easier
+ * way to construct `DataFrame` directly out of local data without relying on implicits.
*/
protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = {
Dataset.ofRows(spark, plan)
@@ -271,7 +271,9 @@ private[sql] trait SQLTestUtils
}
}
- /** Run a test on a separate [[UninterruptibleThread]]. */
+ /**
+ * Run a test on a separate `UninterruptibleThread`.
+ */
protected def testWithUninterruptibleThread(name: String, quietly: Boolean = false)
(body: => Unit): Unit = {
val timeoutMillis = 10000
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
index b01977a238..959edf9a49 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
@@ -22,7 +22,7 @@ import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.{SessionState, SessionStateBuilder, SQLConf, WithTestConf}
/**
- * A special [[SparkSession]] prepared for testing.
+ * A special `SparkSession` prepared for testing.
*/
private[sql] class TestSparkSession(sc: SparkContext) extends SparkSession(sc) { self =>
def this(sparkConf: SparkConf) {