import org.apache.spark.ml.feature.Bucketizer
val splits = Array(Double.NegativeInfinity, -100, -10, 0.0, 10, 90, Double.PositiveInfinity)
val data: Array[Double] = Array(-180,-160,-100,-50,-70,-20,-8,-5,-3, 0.0, 1,3,7,10,30,60,90,100,120,150)
val dataFrame = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
dataFrame: org.apache.spark.sql.DataFrame = [features: double]
val bucketizer = new Bucketizer()
.setInputCol("features")
.setOutputCol("bucketedFeatures")
.setSplits(splits)
val bucketedData = bucketizer.transform(dataFrame)
bucketedData: org.apache.spark.sql.DataFrame = [features: double, bucketedFeatures: double]
|-180.0 |0.0 |
|-160.0 |0.0 |
|-100.0 |1.0 |
|-50.0 |1.0 |
|-70.0 |1.0 |
|-20.0 |1.0 |
|-8.0 |2.0 |
|-5.0 |2.0 |
|-3.0 |2.0 |
|0.0 |3.0 |
|1.0 |3.0 |
|3.0 |3.0 |
|7.0 |3.0 |
|10.0 |4.0 |
|30.0 |4.0 |
|60.0 |4.0 |
|90.0 |5.0 |
|100.0 |5.0 |
|120.0 |5.0 |