diff options
author | Sean Owen <sowen@cloudera.com> | 2014-03-25 10:21:25 -0700 |
---|---|---|
committer | Reynold Xin <rxin@apache.org> | 2014-03-25 10:21:25 -0700 |
commit | 71d4ed271bcbddb154643bd44297ed77190e75cf (patch) | |
tree | a6618c610ce4d9001ca8e5b08d4811e3105ecfc3 /streaming/src/main/scala | |
parent | 134ace7fea7f772f5bafa9d11b8677cb7d311266 (diff) | |
download | spark-71d4ed271bcbddb154643bd44297ed77190e75cf.tar.gz spark-71d4ed271bcbddb154643bd44297ed77190e75cf.tar.bz2 spark-71d4ed271bcbddb154643bd44297ed77190e75cf.zip |
SPARK-1316. Remove use of Commons IO
(This follows from a side point on SPARK-1133, in discussion of the PR: https://github.com/apache/spark/pull/164 )
Commons IO is barely used in the project, and can easily be replaced with equivalent calls to Guava or the existing Spark `Utils.scala` class.
Removing a dependency feels good, and this one in particular can get a little problematic since Hadoop uses it too.
Author: Sean Owen <sowen@cloudera.com>
Closes #226 from srowen/SPARK-1316 and squashes the following commits:
21efef3 [Sean Owen] Remove use of Commons IO
Diffstat (limited to 'streaming/src/main/scala')
-rw-r--r-- | streaming/src/main/scala/org/apache/spark/streaming/util/MasterFailureTest.scala | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/MasterFailureTest.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/MasterFailureTest.scala index 2bb616cfb8..c48a38590e 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/MasterFailureTest.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/MasterFailureTest.scala @@ -28,12 +28,12 @@ import scala.collection.mutable.{SynchronizedBuffer, ArrayBuffer} import scala.reflect.ClassTag import java.io.{File, ObjectInputStream, IOException} +import java.nio.charset.Charset import java.util.UUID import com.google.common.io.Files -import org.apache.commons.io.FileUtils -import org.apache.hadoop.fs.{FileUtil, FileSystem, Path} +import org.apache.hadoop.fs.Path import org.apache.hadoop.conf.Configuration @@ -389,7 +389,7 @@ class FileGeneratingThread(input: Seq[String], testDir: Path, interval: Long) val localFile = new File(localTestDir, (i + 1).toString) val hadoopFile = new Path(testDir, (i + 1).toString) val tempHadoopFile = new Path(testDir, ".tmp_" + (i + 1).toString) - FileUtils.writeStringToFile(localFile, input(i).toString + "\n") + Files.write(input(i) + "\n", localFile, Charset.forName("UTF-8")) var tries = 0 var done = false while (!done && tries < maxTries) { |