java版本kafka createStream
来源:互联网 发布:科比3d模型数据 编辑:程序博客网 时间:2024/06/07 03:17
package com.ys.streaming;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;
public class KafkaReceiver {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("KafkaReceiver").setMaster("local[2]");
JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(5));
Map<String, Integer> topics = new HashMap<String, Integer>();
topics.put("20170420", 2);
/**
* 第一个参数是StreamingContext实例
* 第二个参数是ZooKeeper集群信息
* 第三个参数是Consumer Group
* 第四个参数是消费的Topic以及并发读取Topic中Partition的并发数
*/
JavaPairReceiverInputDStream<String, String> lines = KafkaUtils.createStream(
jsc,
"master:2181,slave1:2181,slave2:2181",
"myGroup",
topics);
JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String,String>, String>() {
private static final long serialVersionUID = 1L;
@Override
public Iterable<String> call(Tuple2<String, String> tuple) throws Exception {
return Arrays.asList(tuple._2.split(" "));
}
});
JavaPairDStream<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<String, Integer>(word, 1);
}
});
JavaPairDStream<String, Integer> result = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
result.print();
jsc.start();
jsc.awaitTermination();
jsc.close();
}
}
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;
public class KafkaReceiver {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("KafkaReceiver").setMaster("local[2]");
JavaStreamingContext jsc = new JavaStreamingContext(conf, Durations.seconds(5));
Map<String, Integer> topics = new HashMap<String, Integer>();
topics.put("20170420", 2);
/**
* 第一个参数是StreamingContext实例
* 第二个参数是ZooKeeper集群信息
* 第三个参数是Consumer Group
* 第四个参数是消费的Topic以及并发读取Topic中Partition的并发数
*/
JavaPairReceiverInputDStream<String, String> lines = KafkaUtils.createStream(
jsc,
"master:2181,slave1:2181,slave2:2181",
"myGroup",
topics);
JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String,String>, String>() {
private static final long serialVersionUID = 1L;
@Override
public Iterable<String> call(Tuple2<String, String> tuple) throws Exception {
return Arrays.asList(tuple._2.split(" "));
}
});
JavaPairDStream<String, Integer> pairs = words.mapToPair(new PairFunction<String, String, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<String, Integer>(word, 1);
}
});
JavaPairDStream<String, Integer> result = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
result.print();
jsc.start();
jsc.awaitTermination();
jsc.close();
}
}
0 0
- java版本kafka createStream
- scala版本kafka createStream
- Kafka + spark stream +redis (createStream + createDirectStream)
- java版本kafka createDirectStream
- spark读取kafka数据 createStream和createDirectStream的区别
- spark读取kafka数据 createStream和createDirectStream的区别
- spark读取kafka数据 createStream和createDirectStream的区别
- kafka文档(2)----kafka API(java版本)
- Kafka:Kafka的生产和消费(Java版本)
- kafka初探 版本0.10 java编程
- Method createStream([class org.apache.spark.streaming.api.java.JavaStreamingContext, class java.uti
- kafka的ZkUtils类的java版本部分代码
- kafka学习(三)--java开发(基于kafka0.8版本)
- kafka文档(3)----0.8.2-kafka API(java版本)
- 查看kafka的版本
- kafka 0.10.0.0 版本
- scala版本kafka createDirectStream
- kafka各版本差异
- To Fill or Not to Fill
- java基本数据类型传递与引用传递区别详解
- oracle数据库中的多表查询
- 九宫重排问题
- 关于配置码云的时候遇到一个小问题
- java版本kafka createStream
- 配置Docker过程记录
- c++实现贪吃蛇
- Redis在线用户设计
- 微信小程序开发常见问题FAQ之八
- 截屏方法
- 自定义倒计时跳过按钮
- Json对象与Json字符串互转(4种转换方式)
- CTPN: Detecting Text in Natural Image with Connectionist Text Proposal Network