Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Licensed to the Apache Software Foundation (ASF) under one or more
0003  * contributor license agreements.  See the NOTICE file distributed with
0004  * this work for additional information regarding copyright ownership.
0005  * The ASF licenses this file to You under the Apache License, Version 2.0
0006  * (the "License"); you may not use this file except in compliance with
0007  * the License.  You may obtain a copy of the License at
0008  *
0009  *    http://www.apache.org/licenses/LICENSE-2.0
0010  *
0011  * Unless required by applicable law or agreed to in writing, software
0012  * distributed under the License is distributed on an "AS IS" BASIS,
0013  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014  * See the License for the specific language governing permissions and
0015  * limitations under the License.
0016  */
0017 
0018 package org.apache.spark.examples.ml;
0019 
0020 // $example on$
0021 import java.util.Arrays;
0022 import java.util.List;
0023 
0024 import org.apache.spark.ml.classification.LogisticRegression;
0025 import org.apache.spark.ml.classification.LogisticRegressionModel;
0026 import org.apache.spark.ml.linalg.VectorUDT;
0027 import org.apache.spark.ml.linalg.Vectors;
0028 import org.apache.spark.ml.param.ParamMap;
0029 import org.apache.spark.sql.Dataset;
0030 import org.apache.spark.sql.Row;
0031 import org.apache.spark.sql.RowFactory;
0032 import org.apache.spark.sql.types.DataTypes;
0033 import org.apache.spark.sql.types.Metadata;
0034 import org.apache.spark.sql.types.StructField;
0035 import org.apache.spark.sql.types.StructType;
0036 // $example off$
0037 import org.apache.spark.sql.SparkSession;
0038 
0039 /**
0040  * Java example for Estimator, Transformer, and Param.
0041  */
0042 public class JavaEstimatorTransformerParamExample {
0043   public static void main(String[] args) {
0044     SparkSession spark = SparkSession
0045       .builder()
0046       .appName("JavaEstimatorTransformerParamExample")
0047       .getOrCreate();
0048 
0049     // $example on$
0050     // Prepare training data.
0051     List<Row> dataTraining = Arrays.asList(
0052         RowFactory.create(1.0, Vectors.dense(0.0, 1.1, 0.1)),
0053         RowFactory.create(0.0, Vectors.dense(2.0, 1.0, -1.0)),
0054         RowFactory.create(0.0, Vectors.dense(2.0, 1.3, 1.0)),
0055         RowFactory.create(1.0, Vectors.dense(0.0, 1.2, -0.5))
0056     );
0057     StructType schema = new StructType(new StructField[]{
0058         new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
0059         new StructField("features", new VectorUDT(), false, Metadata.empty())
0060     });
0061     Dataset<Row> training = spark.createDataFrame(dataTraining, schema);
0062 
0063     // Create a LogisticRegression instance. This instance is an Estimator.
0064     LogisticRegression lr = new LogisticRegression();
0065     // Print out the parameters, documentation, and any default values.
0066     System.out.println("LogisticRegression parameters:\n" + lr.explainParams() + "\n");
0067 
0068     // We may set parameters using setter methods.
0069     lr.setMaxIter(10).setRegParam(0.01);
0070 
0071     // Learn a LogisticRegression model. This uses the parameters stored in lr.
0072     LogisticRegressionModel model1 = lr.fit(training);
0073     // Since model1 is a Model (i.e., a Transformer produced by an Estimator),
0074     // we can view the parameters it used during fit().
0075     // This prints the parameter (name: value) pairs, where names are unique IDs for this
0076     // LogisticRegression instance.
0077     System.out.println("Model 1 was fit using parameters: " + model1.parent().extractParamMap());
0078 
0079     // We may alternatively specify parameters using a ParamMap.
0080     ParamMap paramMap = new ParamMap()
0081       .put(lr.maxIter().w(20))  // Specify 1 Param.
0082       .put(lr.maxIter(), 30)  // This overwrites the original maxIter.
0083       .put(lr.regParam().w(0.1), lr.threshold().w(0.55));  // Specify multiple Params.
0084 
0085     // One can also combine ParamMaps.
0086     ParamMap paramMap2 = new ParamMap()
0087       .put(lr.probabilityCol().w("myProbability"));  // Change output column name
0088     ParamMap paramMapCombined = paramMap.$plus$plus(paramMap2);
0089 
0090     // Now learn a new model using the paramMapCombined parameters.
0091     // paramMapCombined overrides all parameters set earlier via lr.set* methods.
0092     LogisticRegressionModel model2 = lr.fit(training, paramMapCombined);
0093     System.out.println("Model 2 was fit using parameters: " + model2.parent().extractParamMap());
0094 
0095     // Prepare test documents.
0096     List<Row> dataTest = Arrays.asList(
0097         RowFactory.create(1.0, Vectors.dense(-1.0, 1.5, 1.3)),
0098         RowFactory.create(0.0, Vectors.dense(3.0, 2.0, -0.1)),
0099         RowFactory.create(1.0, Vectors.dense(0.0, 2.2, -1.5))
0100     );
0101     Dataset<Row> test = spark.createDataFrame(dataTest, schema);
0102 
0103     // Make predictions on test documents using the Transformer.transform() method.
0104     // LogisticRegression.transform will only use the 'features' column.
0105     // Note that model2.transform() outputs a 'myProbability' column instead of the usual
0106     // 'probability' column since we renamed the lr.probabilityCol parameter previously.
0107     Dataset<Row> results = model2.transform(test);
0108     Dataset<Row> rows = results.select("features", "label", "myProbability", "prediction");
0109     for (Row r: rows.collectAsList()) {
0110       System.out.println("(" + r.get(0) + ", " + r.get(1) + ") -> prob=" + r.get(2)
0111         + ", prediction=" + r.get(3));
0112     }
0113     // $example off$
0114 
0115     spark.stop();
0116   }
0117 }