kafka入门程序

来源:互联网 发布:淘宝买东西剁手的说说 编辑:程序博客网 时间:2024/06/06 20:20

一. 依赖jar包

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">  <modelVersion>4.0.0</modelVersion>  <groupId>com.zrj.points</groupId>  <artifactId>kafka-consumer</artifactId>  <version>0.0.1-SNAPSHOT</version>  <packaging>war</packaging>  <name>points-consumer</name>  <properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding><spring.version>4.3.3.RELEASE</spring.version>  </properties>  <dependencies><dependency>  <groupId>org.slf4j</groupId>  <artifactId>slf4j-api</artifactId>  <version>1.7.5</version></dependency><dependency>  <groupId>org.slf4j</groupId>  <artifactId>slf4j-log4j12</artifactId>  <version>1.7.5</version></dependency><dependency>  <groupId>org.scala-lang</groupId>  <artifactId>scala-library</artifactId>  <version>2.9.2</version></dependency><dependency>  <groupId>com.101tec</groupId>  <artifactId>zkclient</artifactId>  <version>0.3</version></dependency><dependency>  <groupId>org.apache.kafka</groupId>  <artifactId>kafka_2.9.2</artifactId>  <version>0.8.1.1</version></dependency><dependency>  <groupId>org.apache.zookeeper</groupId>  <artifactId>zookeeper</artifactId>  <version>3.4.5</version></dependency><dependency>  <groupId>com.yammer.metrics</groupId>  <artifactId>metrics-core</artifactId>  <version>2.2.0</version></dependency><dependency>  <groupId>commons-logging</groupId>  <artifactId>commons-logging</artifactId>  <version>1.2</version></dependency><dependency>  <groupId>log4j</groupId>  <artifactId>log4j</artifactId>  <version>1.2.17</version></dependency><dependency>  <groupId>commons-lang</groupId>  <artifactId>commons-lang</artifactId>  <version>2.6</version></dependency>  </dependencies>  <build><finalName>kafka-consmer</finalName><plugins><plugin><artifactId>maven-compiler-plugin</artifactId><configuration><source>1.7</source><target>1.7</target><encoding>UTF-8</encoding></configuration></plugin><plugin><groupId>org.eclipse.jetty</groupId><artifactId>jetty-maven-plugin</artifactId><version>9.2.11.v20150529</version><configuration><jvmArgs>-Xms456m -Xmx456m -XX:MaxNewSize=456m-XX:MaxPermSize=1024m</jvmArgs><scanIntervalSeconds>10</scanIntervalSeconds><webApp><contextPath>/</contextPath></webApp><httpConnector><port>9090</port></httpConnector></configuration></plugin></plugins></build></project>



二: 生产者

package com.qianbao.kafka;import java.util.Date;import java.util.Properties;import kafka.javaapi.producer.Producer;import kafka.producer.KeyedMessage;import kafka.producer.ProducerConfig;public class KafkaProducer {private final Producer<String, String> producer;private KafkaProducer() {Properties props = new Properties();props.put("metadata.broker.list", Utils.KAFKA_BROKER_LIST);//props.put("zk.connect", "");//props.put("zookeeper.connect", "");// 配置value的序列化类props.put("serializer.class", "kafka.serializer.StringEncoder");// 配置key的序列化类props.put("key.serializer.class", "kafka.serializer.StringEncoder");props.put("request.required.acks", "-1");//指定partitionprops.put("partitioner.class", "com.qianbao.kafka.RoutePartition");producer = new Producer<String, String>(new ProducerConfig(props));}void produce(int messageNo, int count) {while (messageNo < count) {String key = String.valueOf(messageNo);String data = "hello kafka message " + key;producer.send(new KeyedMessage<String, String>(Utils.KAFKA_TOPIC_NAME,key, data));messageNo++;}}public static void main(String[] args) {int messageNo = 1000;int count = 1010;long startTime = new Date().getTime();KafkaProducer kafkaProducer = new KafkaProducer();kafkaProducer.produce(messageNo, count);long endTime = new Date().getTime();System.out.println("spend time:" + (endTime - startTime));}}

三.消费者

package com.qianbao.kafka;import java.util.HashMap;import java.util.List;import java.util.Map;import java.util.Properties;import java.util.concurrent.ExecutorService;import java.util.concurrent.Executors;import kafka.consumer.ConsumerConfig;import kafka.consumer.ConsumerIterator;import kafka.consumer.KafkaStream;import kafka.javaapi.consumer.ConsumerConnector;import kafka.serializer.StringDecoder;import kafka.utils.VerifiableProperties;public class KafkaConsumer {private final ConsumerConnector consumer;private ExecutorService threadPool;private KafkaConsumer() {Properties props = new Properties();// zookeeper 配置props.put("zookeeper.connect", Utils.KAFKA_ZOOKEEPER);// group 代表一个消费组props.put("group.id", Utils.KAFKA_CONSUMER_GROUP_ID);// zk连接超时props.put("zookeeper.session.timeout.ms", "40000");props.put("zookeeper.sync.time.ms", "200");props.put("auto.commit.interval.ms", "1000");props.put("auto.offset.reset", "smallest");// 序列化类props.put("serializer.class", "kafka.serializer.StringEncoder");ConsumerConfig config = new ConsumerConfig(props);consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);}void consume() {Map<String, Integer> topicCountMap = new HashMap<String, Integer>();topicCountMap.put(Utils.KAFKA_TOPIC_NAME, Utils.KAFKA_TOPIC_PARTITION_NUM);// topicCountMap.put(KafkaProducer2.TOPIC, new Integer(1));StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,keyDecoder, valueDecoder);List<KafkaStream<String, String>> partitions = consumerMap.get(Utils.KAFKA_TOPIC_NAME);threadPool = Executors.newFixedThreadPool(Utils.KAFKA_TOPIC_PARTITION_NUM);int partitionNum = 0;for (KafkaStream<String, String> partition : partitions) {threadPool.execute(new MessageReader(partition, partitionNum));partitionNum++;}}class MessageReader implements Runnable {private KafkaStream<String, String> partition;private int partitionNum;MessageReader(KafkaStream<String, String> partition, int partitionNum) {this.partition = partition;this.partitionNum = partitionNum;}public void run() {ConsumerIterator<String, String> it = partition.iterator();while (true) {if (it.hasNext()) {String oneLineLog = it.next().message();System.out.println(partitionNum + " partition: "+ oneLineLog);} else {try {Thread.sleep(3000);} catch (InterruptedException e) {e.printStackTrace();}}}}}public static void main(String[] args) {new KafkaConsumer().consume();}}

四:自定义分区

package com.qianbao.kafka;import kafka.producer.Partitioner;import kafka.utils.VerifiableProperties;public class RoutePartition implements Partitioner {public RoutePartition(VerifiableProperties props) {}public int partition(Object arg0, int arg1) {// TODO Auto-generated method stubreturn 1;}}

五:工具类(静态常量类)

package com.qianbao.kafka;import kafka.producer.Partitioner;import kafka.utils.VerifiableProperties;public class RoutePartition implements Partitioner {public RoutePartition(VerifiableProperties props) {}public int partition(Object arg0, int arg1) {// TODO Auto-generated method stubreturn 1;}}


原创粉丝点击