0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 package org.apache.spark.ml.feature;
0019
0020 import java.util.Arrays;
0021
0022 import static org.apache.spark.sql.types.DataTypes.*;
0023
0024 import org.junit.Assert;
0025 import org.junit.Test;
0026
0027 import org.apache.spark.SharedSparkSession;
0028 import org.apache.spark.ml.linalg.Vector;
0029 import org.apache.spark.ml.linalg.VectorUDT;
0030 import org.apache.spark.ml.linalg.Vectors;
0031 import org.apache.spark.sql.Dataset;
0032 import org.apache.spark.sql.Row;
0033 import org.apache.spark.sql.RowFactory;
0034 import org.apache.spark.sql.types.StructField;
0035 import org.apache.spark.sql.types.StructType;
0036
0037 public class JavaVectorAssemblerSuite extends SharedSparkSession {
0038
0039 @Test
0040 public void testVectorAssembler() {
0041 StructType schema = createStructType(new StructField[]{
0042 createStructField("id", IntegerType, false),
0043 createStructField("x", DoubleType, false),
0044 createStructField("y", new VectorUDT(), false),
0045 createStructField("name", StringType, false),
0046 createStructField("z", new VectorUDT(), false),
0047 createStructField("n", LongType, false)
0048 });
0049 Row row = RowFactory.create(
0050 0, 0.0, Vectors.dense(1.0, 2.0), "a",
0051 Vectors.sparse(2, new int[]{1}, new double[]{3.0}), 10L);
0052 Dataset<Row> dataset = spark.createDataFrame(Arrays.asList(row), schema);
0053 VectorAssembler assembler = new VectorAssembler()
0054 .setInputCols(new String[]{"x", "y", "z", "n"})
0055 .setOutputCol("features");
0056 Dataset<Row> output = assembler.transform(dataset);
0057 Assert.assertEquals(
0058 Vectors.sparse(6, new int[]{1, 2, 4, 5}, new double[]{1.0, 2.0, 3.0, 10.0}),
0059 output.select("features").first().<Vector>getAs(0));
0060 }
0061 }