使用kafka-clients api操作Kafka
来源:互联网 发布:淘宝莆田鞋 编辑:程序博客网 时间:2024/06/04 18:26
引入kafka-clients相关依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.9.0.1</version>
<exclusions>
<exclusion>
<artifactId>jmxri</artifactId>
<groupId>com.sun.jmx</groupId>
</exclusion>
<exclusion>
<artifactId>jms</artifactId>
<groupId>javax.jms</groupId>
</exclusion>
<exclusion>
<artifactId>jmxtools</artifactId>
<groupId>com.sun.jdmk</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.9.0.1</version>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.5</version>
<exclusions>
<exclusion>
<groupId>com.sun.jmx</groupId>
<artifactId>jmxri</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jdmk</groupId>
<artifactId>jmxtools</artifactId>
</exclusion>
<exclusion>
<groupId>javax.jms</groupId>
<artifactId>jms</artifactId>
</exclusion>
</exclusions>
</dependency>
消息生产者
package com._656463.demo.kafka.simple;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* 简单的消息生产者
*/
public class SimpleProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("zk.connect", "master:2181,slave1:2181,slave2:2181");
// serializer.class为消息的序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
// 配置metadata.broker.list, 为了高可用, 最好配两个broker实例
props.put("metadata.broker.list", "master:9092,slave1:9092,slave2:9092");
// ACK机制, 消息发送需要kafka服务端确认
props.put("request.required.acks", "1");
props.put("num.partitions", "3");
ProducerConfig config = new ProducerConfig(props);
Producer<String, String> producer = new Producer<String, String>(config);
for (int i = 0; i < 10; i++) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy年MM月dd日 HH:mm:ss SSS");
Date curDate = new Date(System.currentTimeMillis());
String str = formatter.format(curDate);
String msg = "test" + i + "=" + str;
String key = i + "";
/**
* KeyedMessage<K, V>,K对应Partition Key的类型,V对应消息本身的类型
* topic: "test", key: "key", message: "message"
*/
producer.send(new KeyedMessage<String, String>("test1",key, msg));
}
}
}
消息消费者
package com._656463.demo.kafka.simple;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
/**
* 消息消费端
*/
public class SimpleConsumer extends Thread {
private final ConsumerConnector consumer;
private final String topic;
public static void main(String[] args) {
SimpleConsumer consumerThread = new SimpleConsumer("test1");
consumerThread.start();
}
public SimpleConsumer(String topic) {
consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
// 设置zookeeper的链接地址
props.put("zookeeper.connect", "master:2181,slave1:2181,slave2:2181");
// 设置group id
props.put("group.id", "1");
// kafka的group 消费记录是保存在zookeeper上的, 但这个信息在zookeeper上不是实时更新的, 需要有个间隔时间更新
props.put("auto.commit.interval.ms", "1000");
props.put("zookeeper.session.timeout.ms", "10000");
return new ConsumerConfig(props);
}
public void run() {
// 设置Topic=>Thread Num映射关系, 构建具体的流
Map<String, Integer> topickMap = new HashMap<String, Integer>();
topickMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streamMap = consumer.createMessageStreams(topickMap);
KafkaStream<byte[], byte[]> stream = streamMap.get(topic).get(0);
ConsumerIterator<byte[], byte[]> it = stream.iterator();
System.out.println("*********Results********");
while (it.hasNext()) {
System.err.println("get data:" + new String(it.next().message()));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
运行程序可以看到,刚好10条信息,没有丢失。不过消息因为均衡的原因,并非是有序的,在Kafka只提供了分区内部的有序性,不能跨partition. 每个分区的有序性,结合按Key分partition的能力对大多应用都够用了。
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.9.0.1</version>
<exclusions>
<exclusion>
<artifactId>jmxri</artifactId>
<groupId>com.sun.jmx</groupId>
</exclusion>
<exclusion>
<artifactId>jms</artifactId>
<groupId>javax.jms</groupId>
</exclusion>
<exclusion>
<artifactId>jmxtools</artifactId>
<groupId>com.sun.jdmk</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.9.0.1</version>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.5</version>
<exclusions>
<exclusion>
<groupId>com.sun.jmx</groupId>
<artifactId>jmxri</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jdmk</groupId>
<artifactId>jmxtools</artifactId>
</exclusion>
<exclusion>
<groupId>javax.jms</groupId>
<artifactId>jms</artifactId>
</exclusion>
</exclusions>
</dependency>
消息生产者
package com._656463.demo.kafka.simple;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* 简单的消息生产者
*/
public class SimpleProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("zk.connect", "master:2181,slave1:2181,slave2:2181");
// serializer.class为消息的序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
// 配置metadata.broker.list, 为了高可用, 最好配两个broker实例
props.put("metadata.broker.list", "master:9092,slave1:9092,slave2:9092");
// ACK机制, 消息发送需要kafka服务端确认
props.put("request.required.acks", "1");
props.put("num.partitions", "3");
ProducerConfig config = new ProducerConfig(props);
Producer<String, String> producer = new Producer<String, String>(config);
for (int i = 0; i < 10; i++) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy年MM月dd日 HH:mm:ss SSS");
Date curDate = new Date(System.currentTimeMillis());
String str = formatter.format(curDate);
String msg = "test" + i + "=" + str;
String key = i + "";
/**
* KeyedMessage<K, V>,K对应Partition Key的类型,V对应消息本身的类型
* topic: "test", key: "key", message: "message"
*/
producer.send(new KeyedMessage<String, String>("test1",key, msg));
}
}
}
消息消费者
package com._656463.demo.kafka.simple;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
/**
* 消息消费端
*/
public class SimpleConsumer extends Thread {
private final ConsumerConnector consumer;
private final String topic;
public static void main(String[] args) {
SimpleConsumer consumerThread = new SimpleConsumer("test1");
consumerThread.start();
}
public SimpleConsumer(String topic) {
consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
// 设置zookeeper的链接地址
props.put("zookeeper.connect", "master:2181,slave1:2181,slave2:2181");
// 设置group id
props.put("group.id", "1");
// kafka的group 消费记录是保存在zookeeper上的, 但这个信息在zookeeper上不是实时更新的, 需要有个间隔时间更新
props.put("auto.commit.interval.ms", "1000");
props.put("zookeeper.session.timeout.ms", "10000");
return new ConsumerConfig(props);
}
public void run() {
// 设置Topic=>Thread Num映射关系, 构建具体的流
Map<String, Integer> topickMap = new HashMap<String, Integer>();
topickMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streamMap = consumer.createMessageStreams(topickMap);
KafkaStream<byte[], byte[]> stream = streamMap.get(topic).get(0);
ConsumerIterator<byte[], byte[]> it = stream.iterator();
System.out.println("*********Results********");
while (it.hasNext()) {
System.err.println("get data:" + new String(it.next().message()));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
运行程序可以看到,刚好10条信息,没有丢失。不过消息因为均衡的原因,并非是有序的,在Kafka只提供了分区内部的有序性,不能跨partition. 每个分区的有序性,结合按Key分partition的能力对大多应用都够用了。
0 0
- 使用kafka-clients api操作Kafka
- kafka kafka-clients 0.10.0.0 API
- 使用spring-kafka操作kafka
- Java Api操作Kafka
- Kafka API操作
- kafka Producer API使用
- 使用 Java 操作 Kafka
- Kafka之Java API操作
- Kafka Java API操作topic
- Kafka Java API操作topic
- Kafka Java API操作topic
- kafka-clients 0.10 消息消费者
- kafka-clients 0.10 消息生产者
- 使用spring-integration-kafka操作kafka
- kafka HighLevelConsumer API 使用案例
- Kafka Client API 基本使用
- kafka 0.8 simple api使用
- [Kafka]为什么使用kafka?
- 蠕虫爬井问题(HDOJ1.2.1)
- Git 忽略一些文件不加入版本控制
- solr界面和查询参数注解
- test
- ubuntu10.04可用源
- 使用kafka-clients api操作Kafka
- Java中的ThreadPoolExecutor
- 远程服务器返回错误: (401)未经授权的解决办法
- C#程序将124.56转换成壹佰贰拾肆元伍角六分
- POJ 3273-Monthly Expense(二分法-最小化最高花费)
- [Android新手学习笔记09]-活动Activity启动模式
- [leetcode-二叉树层次遍历并统计每层节点数]--103. Binary Tree Zigzag Level Order Traversal
- 将youku的视频放到自己的网站上面播放
- Rendering views on the screen