Back to home page

OSCL-LXR

 
 

    


0001 # -*- coding: utf-8 -*-
0002 #
0003 # Licensed to the Apache Software Foundation (ASF) under one or more
0004 # contributor license agreements.  See the NOTICE file distributed with
0005 # this work for additional information regarding copyright ownership.
0006 # The ASF licenses this file to You under the Apache License, Version 2.0
0007 # (the "License"); you may not use this file except in compliance with
0008 # the License.  You may obtain a copy of the License at
0009 #
0010 #    http://www.apache.org/licenses/LICENSE-2.0
0011 #
0012 # Unless required by applicable law or agreed to in writing, software
0013 # distributed under the License is distributed on an "AS IS" BASIS,
0014 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0015 # See the License for the specific language governing permissions and
0016 # limitations under the License.
0017 #
0018 
0019 import inspect
0020 import sys
0021 import array as pyarray
0022 import unittest
0023 
0024 import numpy as np
0025 
0026 from pyspark import keyword_only
0027 from pyspark.ml.classification import LogisticRegression
0028 from pyspark.ml.clustering import KMeans
0029 from pyspark.ml.feature import Binarizer, Bucketizer, ElementwiseProduct, IndexToString, \
0030     MaxAbsScaler, VectorSlicer, Word2Vec
0031 from pyspark.ml.linalg import DenseVector, SparseVector, Vectors
0032 from pyspark.ml.param import Param, Params, TypeConverters
0033 from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
0034 from pyspark.ml.wrapper import JavaParams
0035 from pyspark.testing.mlutils import check_params, PySparkTestCase, SparkSessionTestCase
0036 
0037 
0038 if sys.version > '3':
0039     xrange = range
0040 
0041 
0042 class ParamTypeConversionTests(PySparkTestCase):
0043     """
0044     Test that param type conversion happens.
0045     """
0046 
0047     def test_int(self):
0048         lr = LogisticRegression(maxIter=5.0)
0049         self.assertEqual(lr.getMaxIter(), 5)
0050         self.assertTrue(type(lr.getMaxIter()) == int)
0051         self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
0052         self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
0053 
0054     def test_float(self):
0055         lr = LogisticRegression(tol=1)
0056         self.assertEqual(lr.getTol(), 1.0)
0057         self.assertTrue(type(lr.getTol()) == float)
0058         self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
0059 
0060     def test_vector(self):
0061         ewp = ElementwiseProduct(scalingVec=[1, 3])
0062         self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
0063         ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
0064         self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
0065         self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
0066 
0067     def test_list(self):
0068         l = [0, 1]
0069         for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l), range(len(l)), l),
0070                          pyarray.array('l', l), xrange(2), tuple(l)]:
0071             converted = TypeConverters.toList(lst_like)
0072             self.assertEqual(type(converted), list)
0073             self.assertListEqual(converted, l)
0074 
0075     def test_list_int(self):
0076         for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
0077                         SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
0078                         pyarray.array('d', [1.0, 2.0])]:
0079             vs = VectorSlicer(indices=indices)
0080             self.assertListEqual(vs.getIndices(), [1, 2])
0081             self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
0082         self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
0083 
0084     def test_list_float(self):
0085         b = Bucketizer(splits=[1, 4])
0086         self.assertEqual(b.getSplits(), [1.0, 4.0])
0087         self.assertTrue(all([type(v) == float for v in b.getSplits()]))
0088         self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
0089 
0090     def test_list_list_float(self):
0091         b = Bucketizer(splitsArray=[[-0.1, 0.5, 3], [-5, 1.5]])
0092         self.assertEqual(b.getSplitsArray(), [[-0.1, 0.5, 3.0], [-5.0, 1.5]])
0093         self.assertTrue(all([type(v) == list for v in b.getSplitsArray()]))
0094         self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[0]]))
0095         self.assertTrue(all([type(v) == float for v in b.getSplitsArray()[1]]))
0096         self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=["a", 1.0]))
0097         self.assertRaises(TypeError, lambda: Bucketizer(splitsArray=[[-5, 1.5], ["a", 1.0]]))
0098 
0099     def test_list_string(self):
0100         for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
0101             idx_to_string = IndexToString(labels=labels)
0102             self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
0103         self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
0104 
0105     def test_string(self):
0106         lr = LogisticRegression()
0107         for col in ['features', u'features', np.str_('features')]:
0108             lr.setFeaturesCol(col)
0109             self.assertEqual(lr.getFeaturesCol(), 'features')
0110         self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
0111 
0112     def test_bool(self):
0113         self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
0114         self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
0115 
0116 
0117 class TestParams(HasMaxIter, HasInputCol, HasSeed):
0118     """
0119     A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
0120     """
0121     @keyword_only
0122     def __init__(self, seed=None):
0123         super(TestParams, self).__init__()
0124         self._setDefault(maxIter=10)
0125         kwargs = self._input_kwargs
0126         self.setParams(**kwargs)
0127 
0128     @keyword_only
0129     def setParams(self, seed=None):
0130         """
0131         setParams(self, seed=None)
0132         Sets params for this test.
0133         """
0134         kwargs = self._input_kwargs
0135         return self._set(**kwargs)
0136 
0137 
0138 class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
0139     """
0140     A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
0141     """
0142     @keyword_only
0143     def __init__(self, seed=None):
0144         super(OtherTestParams, self).__init__()
0145         self._setDefault(maxIter=10)
0146         kwargs = self._input_kwargs
0147         self.setParams(**kwargs)
0148 
0149     @keyword_only
0150     def setParams(self, seed=None):
0151         """
0152         setParams(self, seed=None)
0153         Sets params for this test.
0154         """
0155         kwargs = self._input_kwargs
0156         return self._set(**kwargs)
0157 
0158 
0159 class HasThrowableProperty(Params):
0160 
0161     def __init__(self):
0162         super(HasThrowableProperty, self).__init__()
0163         self.p = Param(self, "none", "empty param")
0164 
0165     @property
0166     def test_property(self):
0167         raise RuntimeError("Test property to raise error when invoked")
0168 
0169 
0170 class ParamTests(SparkSessionTestCase):
0171 
0172     def test_copy_new_parent(self):
0173         testParams = TestParams()
0174         # Copying an instantiated param should fail
0175         with self.assertRaises(ValueError):
0176             testParams.maxIter._copy_new_parent(testParams)
0177         # Copying a dummy param should succeed
0178         TestParams.maxIter._copy_new_parent(testParams)
0179         maxIter = testParams.maxIter
0180         self.assertEqual(maxIter.name, "maxIter")
0181         self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
0182         self.assertTrue(maxIter.parent == testParams.uid)
0183 
0184     def test_param(self):
0185         testParams = TestParams()
0186         maxIter = testParams.maxIter
0187         self.assertEqual(maxIter.name, "maxIter")
0188         self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
0189         self.assertTrue(maxIter.parent == testParams.uid)
0190 
0191     def test_hasparam(self):
0192         testParams = TestParams()
0193         self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
0194         self.assertFalse(testParams.hasParam("notAParameter"))
0195         self.assertTrue(testParams.hasParam(u"maxIter"))
0196 
0197     def test_resolveparam(self):
0198         testParams = TestParams()
0199         self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
0200         self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
0201 
0202         self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
0203         if sys.version_info[0] >= 3:
0204             # In Python 3, it is allowed to get/set attributes with non-ascii characters.
0205             e_cls = AttributeError
0206         else:
0207             e_cls = UnicodeEncodeError
0208         self.assertRaises(e_cls, lambda: testParams._resolveParam(u"아"))
0209 
0210     def test_params(self):
0211         testParams = TestParams()
0212         maxIter = testParams.maxIter
0213         inputCol = testParams.inputCol
0214         seed = testParams.seed
0215 
0216         params = testParams.params
0217         self.assertEqual(params, [inputCol, maxIter, seed])
0218 
0219         self.assertTrue(testParams.hasParam(maxIter.name))
0220         self.assertTrue(testParams.hasDefault(maxIter))
0221         self.assertFalse(testParams.isSet(maxIter))
0222         self.assertTrue(testParams.isDefined(maxIter))
0223         self.assertEqual(testParams.getMaxIter(), 10)
0224 
0225         self.assertTrue(testParams.hasParam(inputCol.name))
0226         self.assertFalse(testParams.hasDefault(inputCol))
0227         self.assertFalse(testParams.isSet(inputCol))
0228         self.assertFalse(testParams.isDefined(inputCol))
0229         with self.assertRaises(KeyError):
0230             testParams.getInputCol()
0231 
0232         otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
0233                            "set raises an error for a non-member parameter.",
0234                            typeConverter=TypeConverters.toString)
0235         with self.assertRaises(ValueError):
0236             testParams.set(otherParam, "value")
0237 
0238         # Since the default is normally random, set it to a known number for debug str
0239         testParams._setDefault(seed=41)
0240 
0241         self.assertEqual(
0242             testParams.explainParams(),
0243             "\n".join(["inputCol: input column name. (undefined)",
0244                        "maxIter: max number of iterations (>= 0). (default: 10)",
0245                        "seed: random seed. (default: 41)"]))
0246 
0247     def test_clear_param(self):
0248         df = self.spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
0249         maScaler = MaxAbsScaler(inputCol="a", outputCol="scaled")
0250         model = maScaler.fit(df)
0251         self.assertTrue(model.isSet(model.outputCol))
0252         self.assertEqual(model.getOutputCol(), "scaled")
0253         model.clear(model.outputCol)
0254         self.assertFalse(model.isSet(model.outputCol))
0255         self.assertEqual(model.getOutputCol()[:12], 'MaxAbsScaler')
0256         output = model.transform(df)
0257         self.assertEqual(model.getOutputCol(), output.schema.names[1])
0258 
0259     def test_kmeans_param(self):
0260         algo = KMeans()
0261         self.assertEqual(algo.getInitMode(), "k-means||")
0262         algo.setK(10)
0263         self.assertEqual(algo.getK(), 10)
0264         algo.setInitSteps(10)
0265         self.assertEqual(algo.getInitSteps(), 10)
0266         self.assertEqual(algo.getDistanceMeasure(), "euclidean")
0267         algo.setDistanceMeasure("cosine")
0268         self.assertEqual(algo.getDistanceMeasure(), "cosine")
0269 
0270     def test_hasseed(self):
0271         noSeedSpecd = TestParams()
0272         withSeedSpecd = TestParams(seed=42)
0273         other = OtherTestParams()
0274         # Check that we no longer use 42 as the magic number
0275         self.assertNotEqual(noSeedSpecd.getSeed(), 42)
0276         origSeed = noSeedSpecd.getSeed()
0277         # Check that we only compute the seed once
0278         self.assertEqual(noSeedSpecd.getSeed(), origSeed)
0279         # Check that a specified seed is honored
0280         self.assertEqual(withSeedSpecd.getSeed(), 42)
0281         # Check that a different class has a different seed
0282         self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
0283 
0284     def test_param_property_error(self):
0285         param_store = HasThrowableProperty()
0286         self.assertRaises(RuntimeError, lambda: param_store.test_property)
0287         params = param_store.params  # should not invoke the property 'test_property'
0288         self.assertEqual(len(params), 1)
0289 
0290     def test_word2vec_param(self):
0291         model = Word2Vec().setWindowSize(6)
0292         # Check windowSize is set properly
0293         self.assertEqual(model.getWindowSize(), 6)
0294 
0295     def test_copy_param_extras(self):
0296         tp = TestParams(seed=42)
0297         extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
0298         tp_copy = tp.copy(extra=extra)
0299         self.assertEqual(tp.uid, tp_copy.uid)
0300         self.assertEqual(tp.params, tp_copy.params)
0301         for k, v in extra.items():
0302             self.assertTrue(tp_copy.isDefined(k))
0303             self.assertEqual(tp_copy.getOrDefault(k), v)
0304         copied_no_extra = {}
0305         for k, v in tp_copy._paramMap.items():
0306             if k not in extra:
0307                 copied_no_extra[k] = v
0308         self.assertEqual(tp._paramMap, copied_no_extra)
0309         self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
0310         with self.assertRaises(TypeError):
0311             tp.copy(extra={"unknown_parameter": None})
0312         with self.assertRaises(TypeError):
0313             tp.copy(extra=["must be a dict"])
0314 
0315     def test_logistic_regression_check_thresholds(self):
0316         self.assertIsInstance(
0317             LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
0318             LogisticRegression
0319         )
0320 
0321         self.assertRaisesRegexp(
0322             ValueError,
0323             "Logistic Regression getThreshold found inconsistent.*$",
0324             LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
0325         )
0326 
0327     def test_preserve_set_state(self):
0328         dataset = self.spark.createDataFrame([(0.5,)], ["data"])
0329         binarizer = Binarizer(inputCol="data")
0330         self.assertFalse(binarizer.isSet("threshold"))
0331         binarizer.transform(dataset)
0332         binarizer._transfer_params_from_java()
0333         self.assertFalse(binarizer.isSet("threshold"),
0334                          "Params not explicitly set should remain unset after transform")
0335 
0336     def test_default_params_transferred(self):
0337         dataset = self.spark.createDataFrame([(0.5,)], ["data"])
0338         binarizer = Binarizer(inputCol="data")
0339         # intentionally change the pyspark default, but don't set it
0340         binarizer._defaultParamMap[binarizer.outputCol] = "my_default"
0341         result = binarizer.transform(dataset).select("my_default").collect()
0342         self.assertFalse(binarizer.isSet(binarizer.outputCol))
0343         self.assertEqual(result[0][0], 1.0)
0344 
0345 
0346 class DefaultValuesTests(PySparkTestCase):
0347     """
0348     Test :py:class:`JavaParams` classes to see if their default Param values match
0349     those in their Scala counterparts.
0350     """
0351 
0352     def test_java_params(self):
0353         import pyspark.ml.feature
0354         import pyspark.ml.classification
0355         import pyspark.ml.clustering
0356         import pyspark.ml.evaluation
0357         import pyspark.ml.pipeline
0358         import pyspark.ml.recommendation
0359         import pyspark.ml.regression
0360 
0361         modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
0362                    pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
0363                    pyspark.ml.regression]
0364         for module in modules:
0365             for name, cls in inspect.getmembers(module, inspect.isclass):
0366                 if not name.endswith('Model') and not name.endswith('Params') \
0367                         and issubclass(cls, JavaParams) and not inspect.isabstract(cls) \
0368                         and not name.startswith('Java') and name != '_LSH':
0369                     # NOTE: disable check_params_exist until there is parity with Scala API
0370                     check_params(self, cls(), check_params_exist=False)
0371 
0372         # Additional classes that need explicit construction
0373         from pyspark.ml.feature import CountVectorizerModel, StringIndexerModel
0374         check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
0375                      check_params_exist=False)
0376         check_params(self, StringIndexerModel.from_labels(['a', 'b'], 'input'),
0377                      check_params_exist=False)
0378 
0379 
0380 if __name__ == "__main__":
0381     from pyspark.ml.tests.test_param import *
0382 
0383     try:
0384         import xmlrunner
0385         testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
0386     except ImportError:
0387         testRunner = None
0388     unittest.main(testRunner=testRunner, verbosity=2)