aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwitgo <witgo@qq.com>2014-07-15 10:46:17 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-07-15 10:46:17 -0700
commit9dd635eb5df52835b3b7f4f2b9c789da9e813c71 (patch)
tree32b0a1f4ff11e9a96d5fc2744827c764e239b017
parentcb09e93c1d7ef9c8f0a1abe4e659783c74993a4e (diff)
downloadspark-9dd635eb5df52835b3b7f4f2b9c789da9e813c71.tar.gz
spark-9dd635eb5df52835b3b7f4f2b9c789da9e813c71.tar.bz2
spark-9dd635eb5df52835b3b7f4f2b9c789da9e813c71.zip
SPARK-2480: Resolve sbt warnings "NOTE: SPARK_YARN is deprecated, please use -Pyarn flag"
Author: witgo <witgo@qq.com> Closes #1404 from witgo/run-tests and squashes the following commits: f703aee [witgo] fix Note: implicit method fromPairDStream is not applicable here because it comes after the application point and it lacks an explicit result type 2944f51 [witgo] Remove "NOTE: SPARK_YARN is deprecated, please use -Pyarn flag" ef59c70 [witgo] fix Note: implicit method fromPairDStream is not applicable here because it comes after the application point and it lacks an explicit result type 6cefee5 [witgo] Remove "NOTE: SPARK_YARN is deprecated, please use -Pyarn flag"
-rwxr-xr-xdev/run-tests7
-rwxr-xr-xdev/scalastyle6
-rw-r--r--docs/hadoop-third-party-distributions.md4
-rw-r--r--docs/sql-programming-guide.md2
4 files changed, 9 insertions, 10 deletions
diff --git a/dev/run-tests b/dev/run-tests
index edd17b53b3..51e4def0f8 100755
--- a/dev/run-tests
+++ b/dev/run-tests
@@ -21,8 +21,7 @@
FWDIR="$(cd `dirname $0`/..; pwd)"
cd $FWDIR
-export SPARK_HADOOP_VERSION=2.3.0
-export SPARK_YARN=true
+export SBT_MAVEN_PROFILES="-Pyarn -Phadoop-2.3 -Dhadoop.version=2.3.0"
# Remove work directory
rm -rf ./work
@@ -66,8 +65,8 @@ echo "========================================================================="
# (either resolution or compilation) prompts the user for input either q, r,
# etc to quit or retry. This echo is there to make it not block.
if [ -n "$_RUN_SQL_TESTS" ]; then
- echo -e "q\n" | SPARK_HIVE=true sbt/sbt clean package assembly/assembly test | \
- grep -v -e "info.*Resolving" -e "warn.*Merging" -e "info.*Including"
+ echo -e "q\n" | SBT_MAVEN_PROFILES="$SBT_MAVEN_PROFILES -Phive" sbt/sbt clean package \
+ assembly/assembly test | grep -v -e "info.*Resolving" -e "warn.*Merging" -e "info.*Including"
else
echo -e "q\n" | sbt/sbt clean package assembly/assembly test | \
grep -v -e "info.*Resolving" -e "warn.*Merging" -e "info.*Including"
diff --git a/dev/scalastyle b/dev/scalastyle
index 0e8fd5cc8d..a02d06912f 100755
--- a/dev/scalastyle
+++ b/dev/scalastyle
@@ -17,12 +17,12 @@
# limitations under the License.
#
-echo -e "q\n" | SPARK_HIVE=true sbt/sbt scalastyle > scalastyle.txt
+echo -e "q\n" | sbt/sbt -Phive scalastyle > scalastyle.txt
# Check style with YARN alpha built too
-echo -e "q\n" | SPARK_HADOOP_VERSION=0.23.9 SPARK_YARN=true sbt/sbt yarn-alpha/scalastyle \
+echo -e "q\n" | sbt/sbt -Pyarn -Phadoop-0.23 -Dhadoop.version=0.23.9 yarn-alpha/scalastyle \
>> scalastyle.txt
# Check style with YARN built too
-echo -e "q\n" | SPARK_HADOOP_VERSION=2.2.0 SPARK_YARN=true sbt/sbt yarn/scalastyle \
+echo -e "q\n" | sbt/sbt -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 yarn/scalastyle \
>> scalastyle.txt
ERRORS=$(cat scalastyle.txt | grep -e "\<error\>")
diff --git a/docs/hadoop-third-party-distributions.md b/docs/hadoop-third-party-distributions.md
index 32403bc695..ab1023b8f1 100644
--- a/docs/hadoop-third-party-distributions.md
+++ b/docs/hadoop-third-party-distributions.md
@@ -48,9 +48,9 @@ the _exact_ Hadoop version you are running to avoid any compatibility errors.
</tr>
</table>
-In SBT, the equivalent can be achieved by setting the SPARK_HADOOP_VERSION flag:
+In SBT, the equivalent can be achieved by setting the the `hadoop.version` property:
- SPARK_HADOOP_VERSION=1.0.4 sbt/sbt assembly
+ sbt/sbt -Dhadoop.version=1.0.4 assembly
# Linking Applications to the Hadoop Version
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 522c83884e..38728534a4 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -474,7 +474,7 @@ anotherPeople = sqlContext.jsonRDD(anotherPeopleRDD)
Spark SQL also supports reading and writing data stored in [Apache Hive](http://hive.apache.org/).
However, since Hive has a large number of dependencies, it is not included in the default Spark assembly.
-In order to use Hive you must first run '`SPARK_HIVE=true sbt/sbt assembly/assembly`' (or use `-Phive` for maven).
+In order to use Hive you must first run '`sbt/sbt -Phive assembly/assembly`' (or use `-Phive` for maven).
This command builds a new assembly jar that includes Hive. Note that this Hive assembly jar must also be present
on all of the worker nodes, as they will need access to the Hive serialization and deserialization libraries
(SerDes) in order to acccess data stored in Hive.