kafka helloworld

来源:互联网 发布:美国财经数据 编辑:程序博客网 时间:2024/06/05 11:06

贴一下我根据官网的例子,写的kafka的hello world列子,完整的代码如下

pom.xml如下

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><groupId>com.demo</groupId><artifactId>kafak-demo</artifactId><version>0.0.1-SNAPSHOT</version><packaging>jar</packaging><name>kafak-demo</name><url>http://maven.apache.org</url><properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding></properties><dependencies><dependency><groupId>org.apache.kafka</groupId><artifactId>kafka-clients</artifactId><version>0.9.0.0</version></dependency></dependencies></project>

生产者代码

package com.fosun.kafak_demo.main;import java.util.Properties;import org.apache.kafka.clients.producer.KafkaProducer;import org.apache.kafka.clients.producer.Producer;import org.apache.kafka.clients.producer.ProducerRecord;import com.fosun.kafak_demo.util.Constants;  public class KafkaProducerTest{              private KafkaProducerTest(){      }        void produce() {       Properties props = new Properties();     props.put("bootstrap.servers", "dmeo-hadoop1.fx01:9092,demo-hadoop2.fx01:9092");     props.put("acks", "all");     props.put("retries", 0);     props.put("batch.size", 16384);     props.put("linger.ms", 1);     props.put("buffer.memory", 33554432);     props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");     props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");     Producer<String, String> producer = new KafkaProducer<String, String>(props);     for(int i = 3844; i < 1000000; i++){     System.out.printf("produce %d message\n",i);     producer.send(new ProducerRecord<String, String>(Constants.TOPIC, i%4 , "hello"+Integer.toString(i), "hello"+Integer.toString(i)));     try {Thread.sleep(100);} catch (InterruptedException e) {}     }              producer.close();    }        public static void main( String[] args )      {          new KafkaProducerTest().produce();      }  }  
消费者
package com.fosun.kafak_demo.main;import java.util.Arrays;import java.util.Properties;import org.apache.kafka.clients.consumer.ConsumerRecord;import org.apache.kafka.clients.consumer.ConsumerRecords;import org.apache.kafka.clients.consumer.KafkaConsumer;import org.apache.kafka.common.TopicPartition;import com.fosun.kafak_demo.util.Constants;public class KafkaConsumerTest {          private KafkaConsumerTest() {      }        void consume() {       Properties props = new Properties();         props.put("bootstrap.servers", "demo-hadoop1.fx01:9092,demo-hadoop2.fx01:9092");         props.put("group.id", "testsssss");         props.put("enable.auto.commit", "false");         props.put("auto.commit.interval.ms", "1000");         props.put("auto.offset.reset", "earliest");         props.put("session.timeout.ms", "30000");         props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");         props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");         KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);         //针对某个topic//         consumer.subscribe(Arrays.asList(Constants.TOPIC));         //读具体分区         TopicPartition partition0 = new TopicPartition(Constants.TOPIC,0);         consumer.assign(Arrays.asList(partition0));         boolean flag = true;         while (flag) {             ConsumerRecords<String, String> records = consumer.poll(100);             for (ConsumerRecord<String, String> record : records)                 System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());         }                  consumer.close();    }        public static void main(String[] args) {          new KafkaConsumerTest().consume();      }  }  


原创粉丝点击