STORM入门之(集成KafkaSpout)
来源:互联网 发布:mysql转换日期格式 编辑:程序博客网 时间:2024/05/18 01:21
此篇基于原有两篇文章基础上扩展
STORM入门之(集成KafkaBolt) 传送门:http://blog.csdn.net/yl3395017/article/details/77452604
KafkaSpout更新
主要是构建KafkaSpout基本配置操作
/** * 构建KafkaSpout */ private static void builtKafkaSpout(TopologyBuilder builder) { try { BrokerHosts brokerHosts = new ZkHosts("10.2.4.12:2181,10.2.4.13:2181,10.2.4.14:2181"); SpoutConfig spoutConf = new SpoutConfig(brokerHosts, "test_rce_yjd", "/storm/data/2016122615", "logFramework"); spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme()); spoutConf.zkPort = 2181; spoutConf.ignoreZkOffsets = false; // 每次消费都从头开始,用于性能测试 List<String> servers = new ArrayList<String>(); String ZK_SERVERS = "10.2.4.12,10.2.4.13,10.2.4.14"; if (ZK_SERVERS != null) { String[] arr = ZK_SERVERS.split(","); for (int i = 0; i < arr.length; i++) { if (!("".equals(arr[i]))) { servers.add(arr[i]); } } } spoutConf.zkServers = servers; builder.setSpout("kafka-spout", new KafkaSpout(spoutConf),1); } catch (Exception e) { } }
Topology更新
构建Spout
package com.storm.topology;import com.storm.bolt.BoltA;import com.storm.bolt.KafkaMsgBolt;import com.storm.spout.SpoutA;import org.apache.storm.Config;import org.apache.storm.LocalCluster;import org.apache.storm.StormSubmitter;import org.apache.storm.generated.AlreadyAliveException;import org.apache.storm.generated.AuthorizationException;import org.apache.storm.generated.InvalidTopologyException;import org.apache.storm.kafka.*;import org.apache.storm.kafka.bolt.KafkaBolt;import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector;import org.apache.storm.spout.SchemeAsMultiScheme;import org.apache.storm.topology.TopologyBuilder;import org.apache.storm.utils.Utils;import java.util.ArrayList;import java.util.List;import java.util.Properties;/** * Created with IntelliJ IDEA. * User: Administrator * Date: 17-8-21 * Time: 上午10:54 * To change this template use File | Settings | File Templates. */public class Topology { public static boolean kafka = true; public static void main(String args[]) throws AuthorizationException, AlreadyAliveException, InvalidTopologyException { TopologyBuilder builder = new TopologyBuilder(); if(kafka){ //KafkaSpout builtKafkaSpout(builder); builder.setBolt("BoltA",new BoltA(),1).shuffleGrouping("kafka-spout"); }else{ //普通Spout builder.setSpout("SpoutA",new SpoutA(),1); builder.setBolt("BoltA",new BoltA(),1).shuffleGrouping("SpoutA"); //构建KafkaBolt builtKafkaBolt(builder); } Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("soc", conf, builder.createTopology()); } }
整体代码
package com.storm.topology;import com.storm.bolt.BoltA;import com.storm.bolt.KafkaMsgBolt;import com.storm.spout.SpoutA;import org.apache.storm.Config;import org.apache.storm.LocalCluster;import org.apache.storm.StormSubmitter;import org.apache.storm.generated.AlreadyAliveException;import org.apache.storm.generated.AuthorizationException;import org.apache.storm.generated.InvalidTopologyException;import org.apache.storm.kafka.*;import org.apache.storm.kafka.bolt.KafkaBolt;import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector;import org.apache.storm.spout.SchemeAsMultiScheme;import org.apache.storm.topology.TopologyBuilder;import org.apache.storm.utils.Utils;import java.util.ArrayList;import java.util.List;import java.util.Properties;/** * Created with IntelliJ IDEA. * User: Administrator * Date: 17-8-21 * Time: 上午10:54 * To change this template use File | Settings | File Templates. */public class Topology { public static boolean kafka = true; public static void main(String args[]) throws AuthorizationException, AlreadyAliveException, InvalidTopologyException { TopologyBuilder builder = new TopologyBuilder(); if(kafka){ //KafkaSpout builtKafkaSpout(builder); builder.setBolt("BoltA",new BoltA(),1).shuffleGrouping("kafka-spout"); }else{ //普通Spout builder.setSpout("SpoutA",new SpoutA(),1); builder.setBolt("BoltA",new BoltA(),1).shuffleGrouping("SpoutA"); //构建KafkaBolt builtKafkaBolt(builder); } Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("soc", conf, builder.createTopology());// Utils.sleep(20000);// cluster.killTopology("soc");// cluster.shutdown(); } } /** * 构建KafkaBolt * metadata.broker.list=10.2.4.13:9092,10.2.4.14:9092,10.2.4.12:9092 * bootstrap.servers=10.2.4.13:9092,10.2.4.14:9092,10.2.4.12:9092 * producer.type=async * request.required.ack=1 * serializer.class=kafka.serializer.StringEncoder * key.serializer=org.apache.kafka.common.serialization.StringSerializer * value.serializer=org.apache.kafka.common.serialization.StringSerializer * sendtopic=test */ private static void builtKafkaBolt(TopologyBuilder builder){ //kafka producer config Properties prop = new Properties(); prop.put("metadata.broker.list", "10.2.4.13:9092,10.2.4.14:9092,10.2.4.12:9092"); prop.put("bootstrap.servers", "10.2.4.13:9092,10.2.4.14:9092,10.2.4.12:9092"); prop.put("producer.type","async"); prop.put("request.required.acks","1"); prop.put("serializer.class", "kafka.serializer.StringEncoder"); prop.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer"); prop.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer"); //kafkaBolt KafkaBolt bolt = new KafkaBolt(); bolt.withTopicSelector(new DefaultTopicSelector("test_rce_yjd")); bolt.withProducerProperties(prop); bolt.withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper()); //构建KafkaBolt builder.setBolt("msgSentenceBolt", new KafkaMsgBolt()).shuffleGrouping("BoltA"); //转发kafka消息 builder.setBolt("forwardToKafka", bolt).shuffleGrouping("msgSentenceBolt"); } /** * 构建KafkaSpout */ private static void builtKafkaSpout(TopologyBuilder builder) { try { BrokerHosts brokerHosts = new ZkHosts("10.2.4.12:2181,10.2.4.13:2181,10.2.4.14:2181"); SpoutConfig spoutConf = new SpoutConfig(brokerHosts, "test_rce_yjd", "/storm/data/2016122615", "logFramework"); spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme()); spoutConf.zkPort = 2181; spoutConf.ignoreZkOffsets = false; // 每次消费都从头开始,用于性能测试 List<String> servers = new ArrayList<String>(); String ZK_SERVERS = "10.2.4.12,10.2.4.13,10.2.4.14"; if (ZK_SERVERS != null) { String[] arr = ZK_SERVERS.split(","); for (int i = 0; i < arr.length; i++) { if (!("".equals(arr[i]))) { servers.add(arr[i]); } } } spoutConf.zkServers = servers; builder.setSpout("kafka-spout", new KafkaSpout(spoutConf),1); } catch (Exception e) { } }}
结果
阅读全文
1 0
- STORM入门之(集成KafkaSpout)
- (五)storm-kafka源码走读之KafkaSpout
- kafkaspout+storm
- STORM入门之(集成KafkaBolt)
- STORM入门之(集成ElasticSearch)
- STORM入门之(集成Redis)
- STORM入门之(TridentTopology集成Kafka)
- STORM入门之(集成HDFS)
- (三)storm-kafka源码走读之如何构建一个KafkaSpout
- STORM入门之(Flume Kafka集成架构)
- storm的kafkaSpout实例
- 解析storm的KafkaSpout
- Kafka—Storm之KafkaSpout和KafkaBolt源码解释
- KafkaSpout之PartitionManager
- 【配置】Storm和Kafka的对接:KafkaSpout
- STORM入门之(基本Shell命令)
- STORM入门之(Topology简易Demo)
- STORM入门之(TridentAPI,Each)
- bootstrap的datepicker在选择日期后调用某个方法
- Java VisualVM 插件地址
- android加载大图滑动浏览OOM异常解决
- Java并发解决方案 java.util.concurrent
- 【清北学堂】dwarf
- STORM入门之(集成KafkaSpout)
- Object.create()详解
- linux平台下防火墙iptables原理
- SpringMVC配置全局日期转换器
- 【POJ-2407】Relatives
- @Resource和@Autowired区别
- Q:JButton的setText()方法设置按钮文本,结果按钮上显示的全是"..."
- Spring事务管理(详解+实例)
- log4j异常信息单独保存 log4j包名缩写