aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorKen Takagiwa <ken@Kens-MacBook-Pro.local>2014-07-15 21:34:05 -0700
committerReynold Xin <rxin@apache.org>2014-07-15 21:34:05 -0700
commit563acf5edfbfb2fa756a1f0accde0940592663e9 (patch)
treee4df0fc75b2c1aa748e80293dd099b5eb6ea375b /python
parent9c12de5092312319aa22f24df47a6de0e41a0102 (diff)
downloadspark-563acf5edfbfb2fa756a1f0accde0940592663e9.tar.gz
spark-563acf5edfbfb2fa756a1f0accde0940592663e9.tar.bz2
spark-563acf5edfbfb2fa756a1f0accde0940592663e9.zip
follow pep8 None should be compared using is or is not
http://legacy.python.org/dev/peps/pep-0008/ ## Programming Recommendations - Comparisons to singletons like None should always be done with is or is not, never the equality operators. Author: Ken Takagiwa <ken@Kens-MacBook-Pro.local> Closes #1422 from giwa/apache_master and squashes the following commits: 7b361f3 [Ken Takagiwa] follow pep8 None should be checked using is or is not
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/cloudpickle.py4
-rw-r--r--python/pyspark/conf.py4
-rw-r--r--python/pyspark/rddsampler.py2
-rw-r--r--python/pyspark/shell.py4
4 files changed, 7 insertions, 7 deletions
diff --git a/python/pyspark/cloudpickle.py b/python/pyspark/cloudpickle.py
index eb5dbb8de2..4fda2a9b95 100644
--- a/python/pyspark/cloudpickle.py
+++ b/python/pyspark/cloudpickle.py
@@ -243,10 +243,10 @@ class CloudPickler(pickle.Pickler):
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
- if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule == None:
+ if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule is None:
#Force server to import modules that have been imported in main
modList = None
- if themodule == None and not self.savedForceImports:
+ if themodule is None and not self.savedForceImports:
mainmod = sys.modules['__main__']
if useForcedImports and hasattr(mainmod,'___pyc_forcedImports__'):
modList = list(mainmod.___pyc_forcedImports__)
diff --git a/python/pyspark/conf.py b/python/pyspark/conf.py
index 8eff4a242a..60fc6ba7c5 100644
--- a/python/pyspark/conf.py
+++ b/python/pyspark/conf.py
@@ -30,7 +30,7 @@ u'My app'
u'local'
>>> sc.appName
u'My app'
->>> sc.sparkHome == None
+>>> sc.sparkHome is None
True
>>> conf = SparkConf(loadDefaults=False)
@@ -116,7 +116,7 @@ class SparkConf(object):
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
- if (key != None and pairs != None) or (key == None and pairs == None):
+ if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key != None:
self._jconf.setExecutorEnv(key, value)
diff --git a/python/pyspark/rddsampler.py b/python/pyspark/rddsampler.py
index 845a267e31..122bc38b03 100644
--- a/python/pyspark/rddsampler.py
+++ b/python/pyspark/rddsampler.py
@@ -82,7 +82,7 @@ class RDDSampler(object):
return (num_arrivals - 1)
def shuffle(self, vals):
- if self._random == None:
+ if self._random is None:
self.initRandomGenerator(0) # this should only ever called on the master so
# the split does not matter
diff --git a/python/pyspark/shell.py b/python/pyspark/shell.py
index ebd714db7a..2ce5409cd6 100644
--- a/python/pyspark/shell.py
+++ b/python/pyspark/shell.py
@@ -35,7 +35,7 @@ from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel
# this is the equivalent of ADD_JARS
-add_files = os.environ.get("ADD_FILES").split(',') if os.environ.get("ADD_FILES") != None else None
+add_files = os.environ.get("ADD_FILES").split(',') if os.environ.get("ADD_FILES") is not None else None
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
@@ -55,7 +55,7 @@ print("Using Python version %s (%s, %s)" % (
platform.python_build()[1]))
print("SparkContext available as sc.")
-if add_files != None:
+if add_files is not None:
print("Adding files: [%s]" % ", ".join(add_files))
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,