0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 package org.apache.spark.examples.ml;
0019
0020 import org.apache.spark.sql.Dataset;
0021 import org.apache.spark.sql.SparkSession;
0022
0023
0024 import java.util.Arrays;
0025 import java.util.List;
0026
0027 import org.apache.spark.ml.feature.Binarizer;
0028 import org.apache.spark.sql.Row;
0029 import org.apache.spark.sql.RowFactory;
0030 import org.apache.spark.sql.types.DataTypes;
0031 import org.apache.spark.sql.types.Metadata;
0032 import org.apache.spark.sql.types.StructField;
0033 import org.apache.spark.sql.types.StructType;
0034
0035
0036 public class JavaBinarizerExample {
0037 public static void main(String[] args) {
0038 SparkSession spark = SparkSession
0039 .builder()
0040 .appName("JavaBinarizerExample")
0041 .getOrCreate();
0042
0043
0044 List<Row> data = Arrays.asList(
0045 RowFactory.create(0, 0.1),
0046 RowFactory.create(1, 0.8),
0047 RowFactory.create(2, 0.2)
0048 );
0049 StructType schema = new StructType(new StructField[]{
0050 new StructField("id", DataTypes.IntegerType, false, Metadata.empty()),
0051 new StructField("feature", DataTypes.DoubleType, false, Metadata.empty())
0052 });
0053 Dataset<Row> continuousDataFrame = spark.createDataFrame(data, schema);
0054
0055 Binarizer binarizer = new Binarizer()
0056 .setInputCol("feature")
0057 .setOutputCol("binarized_feature")
0058 .setThreshold(0.5);
0059
0060 Dataset<Row> binarizedDataFrame = binarizer.transform(continuousDataFrame);
0061
0062 System.out.println("Binarizer output with Threshold = " + binarizer.getThreshold());
0063 binarizedDataFrame.show();
0064
0065
0066 spark.stop();
0067 }
0068 }