aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-12-16 15:48:11 -0800
committerDavies Liu <davies.liu@gmail.com>2015-12-16 15:48:11 -0800
commit27b98e99d21a0cc34955337f82a71a18f9220ab2 (patch)
tree4eb3a116264543d9fc0e1121bc93ccaa5b3113f7 /python/pyspark
parent3a44aebd0c5331f6ff00734fa44ef63f8d18cfbb (diff)
downloadspark-27b98e99d21a0cc34955337f82a71a18f9220ab2.tar.gz
spark-27b98e99d21a0cc34955337f82a71a18f9220ab2.tar.bz2
spark-27b98e99d21a0cc34955337f82a71a18f9220ab2.zip
[SPARK-12380] [PYSPARK] use SQLContext.getOrCreate in mllib
MLlib should use SQLContext.getOrCreate() instead of creating new SQLContext. Author: Davies Liu <davies@databricks.com> Closes #10338 from davies/create_context.
Diffstat (limited to 'python/pyspark')
-rw-r--r--python/pyspark/mllib/common.py6
-rw-r--r--python/pyspark/mllib/evaluation.py10
-rw-r--r--python/pyspark/mllib/feature.py4
3 files changed, 9 insertions, 11 deletions
diff --git a/python/pyspark/mllib/common.py b/python/pyspark/mllib/common.py
index a439a488de..9fda1b1682 100644
--- a/python/pyspark/mllib/common.py
+++ b/python/pyspark/mllib/common.py
@@ -102,7 +102,7 @@ def _java2py(sc, r, encoding="bytes"):
return RDD(jrdd, sc)
if clsName == 'DataFrame':
- return DataFrame(r, SQLContext(sc))
+ return DataFrame(r, SQLContext.getOrCreate(sc))
if clsName in _picklable_classes:
r = sc._jvm.SerDe.dumps(r)
@@ -125,7 +125,7 @@ def callJavaFunc(sc, func, *args):
def callMLlibFunc(name, *args):
""" Call API in PythonMLLibAPI """
- sc = SparkContext._active_spark_context
+ sc = SparkContext.getOrCreate()
api = getattr(sc._jvm.PythonMLLibAPI(), name)
return callJavaFunc(sc, api, *args)
@@ -135,7 +135,7 @@ class JavaModelWrapper(object):
Wrapper for the model in JVM
"""
def __init__(self, java_model):
- self._sc = SparkContext._active_spark_context
+ self._sc = SparkContext.getOrCreate()
self._java_model = java_model
def __del__(self):
diff --git a/python/pyspark/mllib/evaluation.py b/python/pyspark/mllib/evaluation.py
index 8c87ee9df2..22e68ea5b4 100644
--- a/python/pyspark/mllib/evaluation.py
+++ b/python/pyspark/mllib/evaluation.py
@@ -44,7 +44,7 @@ class BinaryClassificationMetrics(JavaModelWrapper):
def __init__(self, scoreAndLabels):
sc = scoreAndLabels.ctx
- sql_ctx = SQLContext(sc)
+ sql_ctx = SQLContext.getOrCreate(sc)
df = sql_ctx.createDataFrame(scoreAndLabels, schema=StructType([
StructField("score", DoubleType(), nullable=False),
StructField("label", DoubleType(), nullable=False)]))
@@ -103,7 +103,7 @@ class RegressionMetrics(JavaModelWrapper):
def __init__(self, predictionAndObservations):
sc = predictionAndObservations.ctx
- sql_ctx = SQLContext(sc)
+ sql_ctx = SQLContext.getOrCreate(sc)
df = sql_ctx.createDataFrame(predictionAndObservations, schema=StructType([
StructField("prediction", DoubleType(), nullable=False),
StructField("observation", DoubleType(), nullable=False)]))
@@ -197,7 +197,7 @@ class MulticlassMetrics(JavaModelWrapper):
def __init__(self, predictionAndLabels):
sc = predictionAndLabels.ctx
- sql_ctx = SQLContext(sc)
+ sql_ctx = SQLContext.getOrCreate(sc)
df = sql_ctx.createDataFrame(predictionAndLabels, schema=StructType([
StructField("prediction", DoubleType(), nullable=False),
StructField("label", DoubleType(), nullable=False)]))
@@ -338,7 +338,7 @@ class RankingMetrics(JavaModelWrapper):
def __init__(self, predictionAndLabels):
sc = predictionAndLabels.ctx
- sql_ctx = SQLContext(sc)
+ sql_ctx = SQLContext.getOrCreate(sc)
df = sql_ctx.createDataFrame(predictionAndLabels,
schema=sql_ctx._inferSchema(predictionAndLabels))
java_model = callMLlibFunc("newRankingMetrics", df._jdf)
@@ -424,7 +424,7 @@ class MultilabelMetrics(JavaModelWrapper):
def __init__(self, predictionAndLabels):
sc = predictionAndLabels.ctx
- sql_ctx = SQLContext(sc)
+ sql_ctx = SQLContext.getOrCreate(sc)
df = sql_ctx.createDataFrame(predictionAndLabels,
schema=sql_ctx._inferSchema(predictionAndLabels))
java_class = sc._jvm.org.apache.spark.mllib.evaluation.MultilabelMetrics
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index 7254679ebb..acd7ec57d6 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -30,7 +30,7 @@ if sys.version >= '3':
from py4j.protocol import Py4JJavaError
-from pyspark import SparkContext, since
+from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import (
@@ -100,8 +100,6 @@ class Normalizer(VectorTransformer):
:return: normalized vector. If the norm of the input is zero, it
will return the input vector.
"""
- sc = SparkContext._active_spark_context
- assert sc is not None, "SparkContext should be initialized first"
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else: