Spark Streaming---HDFSwordcount

来源:互联网 发布:二叉树的遍历算法三种 编辑:程序博客网 时间:2024/05/21 22:00
package com.spark.streaming;import java.util.Arrays;import org.apache.spark.SparkConf;import org.apache.spark.api.java.function.FlatMapFunction;import org.apache.spark.api.java.function.Function2;import org.apache.spark.api.java.function.PairFunction;import org.apache.spark.streaming.Durations;import org.apache.spark.streaming.api.java.JavaDStream;import org.apache.spark.streaming.api.java.JavaPairDStream;import org.apache.spark.streaming.api.java.JavaStreamingContext;import scala.Tuple2;public class HDFSWordcount {    public static void main(String[] args) {        SparkConf conf = new SparkConf().setAppName("HDFSWordcount");        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));        JavaDStream<String> lines = jssc.textFileStream("hdfs://node12:8020/Spark/Streaming/WordCount");        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {            private static final long serialVersionUID = 1L;            @Override            public Iterable<String> call(String line) throws Exception {                return Arrays.asList(line.split(" "));            }        });        JavaPairDStream<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {            private static final long serialVersionUID = 1L;            @Override            public Tuple2<String, Integer> call(String word) throws Exception {                return new Tuple2<String,Integer>(word, 1);            }        });        JavaPairDStream<String, Integer> wordcounts = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {            private static final long serialVersionUID = 1L;            @Override            public Integer call(Integer v1, Integer v2) throws Exception {                return v1 + v2;            }        });        wordcounts.print();        jssc.start();        jssc.awaitTermination();        jssc.close();    }}
原创粉丝点击