aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2016-04-27 14:08:26 -0700
committerJoseph K. Bradley <joseph@databricks.com>2016-04-27 14:08:26 -0700
commit4672e9838b130d006965efeba2665676aa995ebc (patch)
tree1c9461c5596c76eb10059d90c351b4f2ded1bcb7 /python
parent24bea000476cdd0b43be5160a76bc5b170ef0b42 (diff)
downloadspark-4672e9838b130d006965efeba2665676aa995ebc.tar.gz
spark-4672e9838b130d006965efeba2665676aa995ebc.tar.bz2
spark-4672e9838b130d006965efeba2665676aa995ebc.zip
[SPARK-14899][ML][PYSPARK] Remove spark.ml HashingTF hashingAlg option
## What changes were proposed in this pull request? Since [SPARK-10574](https://issues.apache.org/jira/browse/SPARK-10574) breaks behavior of ```HashingTF```, we should try to enforce good practice by removing the "native" hashAlgorithm option in spark.ml and pyspark.ml. We can leave spark.mllib and pyspark.mllib alone. ## How was this patch tested? Unit tests. cc jkbradley Author: Yanbo Liang <ybliang8@gmail.com> Closes #12702 from yanboliang/spark-14899.
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/ml/feature.py41
-rw-r--r--python/pyspark/ml/tests.py9
2 files changed, 14 insertions, 36 deletions
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index 0e578d48ca..610d167f3a 100644
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -517,8 +517,12 @@ class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, Java
"""
.. note:: Experimental
- Maps a sequence of terms to their term frequencies using the
- hashing trick.
+ Maps a sequence of terms to their term frequencies using the hashing trick.
+ Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
+ to calculate the hash code value for the term object.
+ Since a simple modulo is used to transform the hash function to a column index,
+ it is advisable to use a power of two as the numFeatures parameter;
+ otherwise the features will not be mapped evenly to the columns.
>>> df = sqlContext.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
@@ -543,30 +547,22 @@ class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, Java
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
- hashAlgorithm = Param(Params._dummy(), "hashAlgorithm", "The hash algorithm used when " +
- "mapping term to integer. Supported options: murmur3(default) " +
- "and native.", typeConverter=TypeConverters.toString)
-
@keyword_only
- def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None,
- hashAlgorithm="murmur3"):
+ def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
- __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None, \
- hashAlgorithm="murmur3")
+ __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
- self._setDefault(numFeatures=1 << 18, binary=False, hashAlgorithm="murmur3")
+ self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
- def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None,
- hashAlgorithm="murmur3"):
+ def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
- setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None, \
- hashAlgorithm="murmur3")
+ setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self.setParams._input_kwargs
@@ -587,21 +583,6 @@ class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, Java
"""
return self.getOrDefault(self.binary)
- @since("2.0.0")
- def setHashAlgorithm(self, value):
- """
- Sets the value of :py:attr:`hashAlgorithm`.
- """
- self._set(hashAlgorithm=value)
- return self
-
- @since("2.0.0")
- def getHashAlgorithm(self):
- """
- Gets the value of hashAlgorithm or its default value.
- """
- return self.getOrDefault(self.hashAlgorithm)
-
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
diff --git a/python/pyspark/ml/tests.py b/python/pyspark/ml/tests.py
index d014da8d0e..ebef656632 100644
--- a/python/pyspark/ml/tests.py
+++ b/python/pyspark/ml/tests.py
@@ -911,15 +911,12 @@ class HashingTFTest(PySparkTestCase):
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
- n = 100
+ n = 10
hashingTF = HashingTF()
- hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n)\
- .setBinary(True).setHashAlgorithm("native")
+ hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
- expected = Vectors.sparse(n, {(ord("a") % n): 1.0,
- (ord("b") % n): 1.0,
- (ord("c") % n): 1.0}).toArray()
+ expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))