aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main/scala/org
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-03-16 09:36:34 +0000
committerSean Owen <sowen@cloudera.com>2016-03-16 09:36:34 +0000
commit3b461d9ecd633c4fd659998b99e700d76f58d18a (patch)
tree09e9923fc17bada794d01bc365f130039972a8b7 /sql/core/src/main/scala/org
parent05ab2948ab357fc07222bb3505df80b1886f7310 (diff)
downloadspark-3b461d9ecd633c4fd659998b99e700d76f58d18a.tar.gz
spark-3b461d9ecd633c4fd659998b99e700d76f58d18a.tar.bz2
spark-3b461d9ecd633c4fd659998b99e700d76f58d18a.zip
[SPARK-13823][SPARK-13397][SPARK-13395][CORE] More warnings, StandardCharset follow up
## What changes were proposed in this pull request? Follow up to https://github.com/apache/spark/pull/11657 - Also update `String.getBytes("UTF-8")` to use `StandardCharsets.UTF_8` - And fix one last new Coverity warning that turned up (use of unguarded `wait()` replaced by simpler/more robust `java.util.concurrent` classes in tests) - And while we're here cleaning up Coverity warnings, just fix about 15 more build warnings ## How was this patch tested? Jenkins tests Author: Sean Owen <sowen@cloudera.com> Closes #11725 from srowen/SPARK-13823.2.
Diffstat (limited to 'sql/core/src/main/scala/org')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvaluatePython.scala9
1 files changed, 6 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvaluatePython.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvaluatePython.scala
index 8c46516594..da28ec4f53 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvaluatePython.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/EvaluatePython.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql.execution.python
import java.io.OutputStream
+import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
@@ -136,7 +137,7 @@ object EvaluatePython {
case (c, StringType) => UTF8String.fromString(c.toString)
- case (c: String, BinaryType) => c.getBytes("utf-8")
+ case (c: String, BinaryType) => c.getBytes(StandardCharsets.UTF_8)
case (c, BinaryType) if c.getClass.isArray && c.getClass.getComponentType.getName == "byte" => c
case (c: java.util.List[_], ArrayType(elementType, _)) =>
@@ -185,7 +186,8 @@ object EvaluatePython {
def pickle(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
out.write(Opcodes.GLOBAL)
- out.write((module + "\n" + "_parse_datatype_json_string" + "\n").getBytes("utf-8"))
+ out.write(
+ (module + "\n" + "_parse_datatype_json_string" + "\n").getBytes(StandardCharsets.UTF_8))
val schema = obj.asInstanceOf[StructType]
pickler.save(schema.json)
out.write(Opcodes.TUPLE1)
@@ -209,7 +211,8 @@ object EvaluatePython {
def pickle(obj: Object, out: OutputStream, pickler: Pickler): Unit = {
if (obj == this) {
out.write(Opcodes.GLOBAL)
- out.write((module + "\n" + "_create_row_inbound_converter" + "\n").getBytes("utf-8"))
+ out.write(
+ (module + "\n" + "_create_row_inbound_converter" + "\n").getBytes(StandardCharsets.UTF_8))
} else {
// it will be memorized by Pickler to save some bytes
pickler.save(this)