Back to home page

OSCL-LXR

 
 

    


0001 #
0002 # Licensed to the Apache Software Foundation (ASF) under one or more
0003 # contributor license agreements.  See the NOTICE file distributed with
0004 # this work for additional information regarding copyright ownership.
0005 # The ASF licenses this file to You under the Apache License, Version 2.0
0006 # (the "License"); you may not use this file except in compliance with
0007 # the License.  You may obtain a copy of the License at
0008 #
0009 #    http://www.apache.org/licenses/LICENSE-2.0
0010 #
0011 # Unless required by applicable law or agreed to in writing, software
0012 # distributed under the License is distributed on an "AS IS" BASIS,
0013 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014 # See the License for the specific language governing permissions and
0015 # limitations under the License.
0016 #
0017 
0018 """
0019 Gradient Boosted Tree Classifier Example.
0020 """
0021 from __future__ import print_function
0022 
0023 # $example on$
0024 from pyspark.ml import Pipeline
0025 from pyspark.ml.classification import GBTClassifier
0026 from pyspark.ml.feature import StringIndexer, VectorIndexer
0027 from pyspark.ml.evaluation import MulticlassClassificationEvaluator
0028 # $example off$
0029 from pyspark.sql import SparkSession
0030 
0031 if __name__ == "__main__":
0032     spark = SparkSession\
0033         .builder\
0034         .appName("GradientBoostedTreeClassifierExample")\
0035         .getOrCreate()
0036 
0037     # $example on$
0038     # Load and parse the data file, converting it to a DataFrame.
0039     data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
0040 
0041     # Index labels, adding metadata to the label column.
0042     # Fit on whole dataset to include all labels in index.
0043     labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
0044     # Automatically identify categorical features, and index them.
0045     # Set maxCategories so features with > 4 distinct values are treated as continuous.
0046     featureIndexer =\
0047         VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
0048 
0049     # Split the data into training and test sets (30% held out for testing)
0050     (trainingData, testData) = data.randomSplit([0.7, 0.3])
0051 
0052     # Train a GBT model.
0053     gbt = GBTClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", maxIter=10)
0054 
0055     # Chain indexers and GBT in a Pipeline
0056     pipeline = Pipeline(stages=[labelIndexer, featureIndexer, gbt])
0057 
0058     # Train model.  This also runs the indexers.
0059     model = pipeline.fit(trainingData)
0060 
0061     # Make predictions.
0062     predictions = model.transform(testData)
0063 
0064     # Select example rows to display.
0065     predictions.select("prediction", "indexedLabel", "features").show(5)
0066 
0067     # Select (prediction, true label) and compute test error
0068     evaluator = MulticlassClassificationEvaluator(
0069         labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
0070     accuracy = evaluator.evaluate(predictions)
0071     print("Test Error = %g" % (1.0 - accuracy))
0072 
0073     gbtModel = model.stages[2]
0074     print(gbtModel)  # summary only
0075     # $example off$
0076 
0077     spark.stop()