spark - k-mean

来源:互联网 发布:北京网络医生招聘 编辑:程序博客网 时间:2024/06/08 15:26
/** *  1.0 1.0 * 1.0 2.0 * 2.0 1.0 * 2.0 2.0 * 3.0 3.0 * 3.0 4.0 * 4.0 3.0 * 4.0 4.0 */package com.spark.testimport org.apache.spark.{ SparkContext, SparkConf }import org.apache.spark.SparkContext._import org.apache.spark.mllib.clustering.KMeansimport org.apache.spark.mllib.linalg.Vectorsobject ObKMeans {  def main(args: Array[String]) {    run()  }  def run() {    val conf = new SparkConf().setAppName("ObKMeansTest")    val sc = new SparkContext(conf)    // Load and parse the data      val data = sc.textFile("/ruson/kmean.txt")    //            org.apache.spark.mllib.linalg.Vector    //            val parsedData = data.map( _.split(' ').map(_.toVector))      val parsedData = data.map(s => Vectors.dense(s.split(' ').map(_.toDouble)))    // Cluster the data into two classes using KMeans      val numIterations = 20    val numClusters = 4    val clusters = KMeans.train(parsedData, numClusters, numIterations)    // Evaluate clustering by computing Within Set Sum of Squared Errors      val WSSSE = clusters.computeCost(parsedData)        println("Within Set Sum of Squared Errors = " + WSSSE)    val result = parsedData.map(point => clusters.predict(point))    val resultFile = "/ruson/KMeansResult"    result.saveAsTextFile(resultFile)    println("Result file : " + resultFile)  }}  

上面是数据集;运行之后结果分为四类如下




0 0
原创粉丝点击