Spark Streaming整合kafak

来源:互联网 发布:建筑施工图设计软件 编辑:程序博客网 时间:2024/06/05 22:40
package day05.dimport org.apache.spark.{HashPartitioner, SparkConf}import org.apache.spark.storage.StorageLevelimport org.apache.spark.streaming.kafka.KafkaUtilsimport org.apache.spark.streaming.{Seconds, StreamingContext}object KafkaWordCount {  val updateFunc = (iter: Iterator[(String, Seq[Int], Option[Int])]) => {    //iter.flatMap(it=>Some(it._2.sum + it._3.getOrElse(0)).map(x=>(it._1,x)))    iter.flatMap { case (x, y, z) => Some(y.sum + z.getOrElse(0)).map(i => (x, i)) }  }  def main(args: Array[String]): Unit = {    LoggerLevels.setStreamingLogLevels()    val Array(zkQuorum, group, topics, numThreads) = args    val sparkConf = new SparkConf().setAppName("KafkaWordCount").setMaster("local[2]")    val ssc = new StreamingContext(sparkConf, Seconds(5))    ssc.checkpoint("c://ck2")    //"alog-2016-04-16,alog-2016-04-17,alog-2016-04-18"    //"Array((alog-2016-04-16, 2), (alog-2016-04-17, 2), (alog-2016-04-18, 2))"    val topicMap = topics.split(",").map((_, numThreads.toInt)).toMap    val data = KafkaUtils.createStream(ssc, zkQuorum, group, topicMap, StorageLevel.MEMORY_AND_DISK_SER)    val words = data.map(_._2).flatMap(_.split(" "))    val wordCounts = words.map((_, 1)).updateStateByKey(updateFunc, new HashPartitioner(ssc.sparkContext.defaultParallelism), true)    ssc.start()    ssc.awaitTermination()  }}