正如大家所知道的MLLib这个算法包apache已经宣布只维护不更新了,所以大家如果做算法本人推荐使用ML的算法包。
原理的话本人就不在讲了,因为很多资料都写的比较清晰明白,这里我只写代码
本人在这里写了一些sparkml和mllib的示例入门程序
SparkML
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.{ VectorAssembler}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DoubleType,StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
/**
* Created by LiuWenSheng on 2017/10/11.
*/
object MyKMeans {
def main(args: Array[String]) {
val sparkSession = SparkSession.builder().appName("mykmeans").master("local[2]").getOrCreate()
//本例使用的数据集为鸢尾花数据集,大家可以自行网上下载。
val rawData: RDD[String] = sparkSession.sparkContext.textFile("D:/testData/fun.txt")
//转换数据格式
val data = rawData.map(_.split(",")).map(x=>
Row(x(0).toDouble,x(1).toDouble,x(2).toDouble,x(3).toDouble)
)
//创建StructType格式
val struct: StructType = StructType(
StructField("f1",DoubleType,false)::
StructField("f2",DoubleType,false)::
StructField("f3",DoubleType,false)::
StructField("f4",DoubleType,false)::Nil)
//创建DataFrame
val df = sparkSession.createDataFrame(data,struct)
//把f1~f4转化为向量集合
val vectorAssembler = new VectorAssembler()
.setInputCols(Array("f1","f2","f3","f4"))
.setOutputCol("features")
val kms = new KMeans()
.setPredictionCol("predictionCol")
.setFeaturesCol("features")
.setMaxIter(66)
.setK(4)
val a = vectorAssembler.transform(df)
val model: KMeansModel = kms.fit(a)
model.transform(a).show(100)
val wssse = model.computeCost(a)
println("wssse is :"+wssse)
//计算聚类的中心点
model.clusterCenters.foreach(println)
/** 如果想要使用pipeline的话代码如下 **/
// val pipeLine: Pipeline = new Pipeline().setStages(Array(vectorAssembler,model))
// val res: PipelineModel = pipeLine.fit(df)
// val b: DataFrame = res.transform(df)
// b.show(130)
}
}
结果如下所示:
+---+---+---+---+-----------------+-------------+
| f1| f2| f3| f4| features|predictionCol|
+---+---+---+---+-----------------+-------------+
|5.1|3.5|1.4|0.2|[5.1,3.5,1.4,0.2]| 3|
|4.9|3.0|1.4|0.2|[4.9,3.0,1.4,0.2]| 0|
|4.7|3.2|1.3|0.2|[4.7,3.2,1.3,0.2]| 0|
|4.6|3.1|1.5|0.2|[4.6,3.1,1.5,0.2]| 0|
|5.0|3.6|1.4|0.2|[5.0,3.6,1.4,0.2]| 3|
|5.4|3.9|1.7|0.4|[5.4,3.9,1.7,0.4]| 3|
|4.6|3.4|1.4|0.3|[4.6,3.4,1.4,0.3]| 0|
|5.0|3.4|1.5|0.2|[5.0,3.4,1.5,0.2]| 0|
|4.4|2.9|1.4|0.2|[4.4,2.9,1.4,0.2]| 0|
|4.9|3.1|1.5|0.1|[4.9,3.1,1.5,0.1]| 0|
+---+---+---+---+-----------------+-------------+
only showing top 10 rows
wssse is :71.34822351828443
[4.725,3.1333333333333333,1.4208333333333334,0.19166666666666676]
[5.883606557377049,2.740983606557377,4.388524590163936,1.4344262295081964]
[6.8538461538461535,3.076923076923076,5.715384615384614,2.053846153846153]
[5.265384615384616,3.6807692307692306,1.503846153846154,0.2923076923076923]
SparkMLLib实现KMeans
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by LiuWenSheng on 2017/10/16.
*/
object MLLib_KMeans {
def main(args: Array[String]) {
val conf = new SparkConf().setMaster("local[2]").setAppName("mllibKMeans")
val sc = new SparkContext(conf)
val data = sc.textFile("D:/testData/fun.txt")
//制作vetors向量的RDD用于计算
val parseTrainData: RDD[Vector] = data.map { line =>
Vectors.dense(line.split(",").take(4).map(_.toDouble))
}
val numIterators = 66//迭代次数
val numClusters = 3 //聚的的类的种树
val runs = 3 //运行次数选出最优解
//开始训练
val model = KMeans.train(parseTrainData,numClusters,numIterators,runs)
parseTrainData.map(x=>(x.toString+"is belongs to")+model.predict(x)).collect().foreach(println(_))
//计算cost (点到最近的中心平方之和)
val wssse = model.computeCost(parseTrainData)
println("wssse is:" + wssse)
//计算中心点
model.clusterCenters.foreach(println(_))
println(model.predict(Vectors.dense(6.6,2.9,4.5,1.2)))
}
}
结果如下所示:
[5.1,3.5,1.4,0.2]is belongs to0
[4.9,3.0,1.4,0.2]is belongs to0
[4.7,3.2,1.3,0.2]is belongs to0
[4.6,3.1,1.5,0.2]is belongs to0
[5.0,3.6,1.4,0.2]is belongs to0
[5.4,3.9,1.7,0.4]is belongs to0
[4.6,3.4,1.4,0.3]is belongs to0
[5.0,3.4,1.5,0.2]is belongs to0
[4.4,2.9,1.4,0.2]is belongs to0
[4.9,3.1,1.5,0.1]is belongs to0
[5.4,3.7,1.5,0.2]is belongs to0
[4.8,3.4,1.6,0.2]is belongs to0
[4.8,3.0,1.4,0.1]is belongs to0
[4.3,3.0,1.1,0.1]is belongs to0
[5.8,4.0,1.2,0.2]is belongs to0
[5.7,4.4,1.5,0.4]is belongs to0
[5.4,3.9,1.3,0.4]is belongs to0
only show a few
wssse is:78.94506582597703
[5.005999999999999,3.4180000000000006,1.4640000000000002,0.2439999999999999]
[6.8538461538461535,3.076923076923076,5.715384615384614,2.053846153846153]
[5.883606557377049,2.740983606557377,4.388524590163936,1.4344262295081964]
2