zookeeper+kafka+storm+flume+log4j

来源:互联网 发布:网络代理平台 编辑:程序博客网 时间:2024/05/19 02:29

1.修改 flume 配置并重启。

conf/log4j-kafka-flume.properties


# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1


# Describe/configure the source
a1.sources.r1.type = avro
a1.sources.r1.bind = 0.0.0.0
a1.sources.r1.port = 44446


# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.brokerList = storm1:9092,storm2:9092,storm3:9092
a1.sinks.k1.topic = kafkatest
a1.sinks.k1.serializer.class = kafka.serializer.StringEncoder
a1.sinks.k1.producer.type = async


# Use a channel which buffers events in memory
a1.channels.c1.type = file
#写出到检查点的目录
a1.channels.c1.checkpointDir = /tmp/flume/channels/kafkatest/checkpoint
#告诉 channel 一旦它被完全写出是否支持检查点。参数值 true/false 如果设置true,backupCheckpointDir参数必须设置
a1.channels.c1.useDualCheckpoints = true
#支持检查点的目录。如果主检查点损坏或不完整,channel 可以从备份中恢复从而避免数据文件的完整回放。不同于 checkpointDir
a1.channels.c1.backupCheckpointDir = /tmp/flume/channels/kafkatest/backup
#写入事件到以逗号分隔的列表目录。配置多个目录,每个挂载不同的磁盘,通过并行写入磁盘可以显著提高性能。
a1.channels.c1.dataDirs = /data1/flume/channels/kafkatest/data
#每次写入或读取应该等待完成的最大时间周期(以秒为单位)
#a1.channels.c1.keep-alive = 30
#每个数据文件的最大大小,字节为单位,一旦文件达到这个大小,该文件保存关闭并在那个目录下创建一个新的数据文件
a1.channels.c1.maxFileSize = 20000
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1


bin/flume-ng agent --conf-file  conf/conf/log4j-kafka-flume.properties -c conf/ --name a1-Dflume.root.logger=DEBUG,console



2.新建 springboot 项目 springboot-log4j 用于 log4j 直接输出日志到 flume

(1)resource下新建 log4j.properties

### set log levels ###log4j.rootLogger=INFO, stdout, file, flumelog4j.logger.per.flume=INFO### flume ###log4j.appender.flume=org.apache.flume.clients.log4jappender.Log4jAppenderlog4j.appender.flume.layout=org.apache.log4j.PatternLayoutlog4j.appender.flume.Hostname=192.168.0.66log4j.appender.flume.Port=44446
log4j.appender.flume.UnsafeMode=true
### stdout ###log4j.appender.stdout=org.apache.log4j.ConsoleAppenderlog4j.appender.stdout.Threshold=INFOlog4j.appender.stdout.Target=System.outlog4j.appender.stdout.layout=org.apache.log4j.PatternLayoutlog4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n### file ###log4j.appender.file=org.apache.log4j.DailyRollingFileAppenderlog4j.appender.file.Threshold=INFOlog4j.appender.file.File=./logs/tracker/tracker.loglog4j.appender.file.Append=truelog4j.appender.file.DatePattern='.'yyyy-MM-ddlog4j.appender.file.layout=org.apache.log4j.PatternLayoutlog4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n

(2)pom.xml

<?xml version="1.0" encoding="UTF-8"?><project xmlns="http://maven.apache.org/POM/4.0.0"         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">    <modelVersion>4.0.0</modelVersion>    <groupId>com.whl.demo</groupId>    <artifactId>flume-log4j</artifactId>    <version>1.0-SNAPSHOT</version>    <dependencies>        <dependency>            <groupId>org.slf4j</groupId>            <artifactId>slf4j-log4j12</artifactId>            <version>1.7.10</version>        </dependency>        <dependency>            <groupId>org.apache.flume</groupId>            <artifactId>flume-ng-core</artifactId>            <version>1.6.0</version>        </dependency>        <dependency>            <groupId>org.apache.flume.flume-ng-clients</groupId>            <artifactId>flume-ng-log4jappender</artifactId>            <version>1.6.0</version>        </dependency>    </dependencies></project>


(3)新建测试类 WriteLog
package com.whl.demo;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import java.util.Date;public class WriteLog {    protected static final Logger logger = LoggerFactory.getLogger(WriteLog.class);    public static void main(String[] args) {        logger.info(String.valueOf(new Date().getTime()));        try {            for (int i=0;i<10000;i++){                //Thread.sleep(2000);                logger.info("id--test:"+i);            }        } catch (Exception e) {            e.printStackTrace();        }    }}

(4)运行上述代码后,便可在kafka消费者终端看到结果。
2.新建springboot-storm 项目 用于 storm 处理实时消息。
(1)pom.xml
<?xml version="1.0" encoding="UTF-8"?><project xmlns="http://maven.apache.org/POM/4.0.0"         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">    <modelVersion>4.0.0</modelVersion>    <groupId>com.whl.demo</groupId>    <artifactId>storm-demo</artifactId>    <version>1.0-SNAPSHOT</version>    <packaging>jar</packaging>    <name>storm-demo</name>    <url>http://maven.apache.org</url>    <repositories>        <repository>            <id>clojars.org</id>            <url>http://clojars.org/repo</url>        </repository>    </repositories>    <properties>        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>    </properties>    <dependencies>        <!-- storm -->        <dependency>            <groupId>org.apache.storm</groupId>            <artifactId>storm-core</artifactId>            <version>0.10.2</version>            <scope>provided</scope>        </dependency>        <dependency>            <groupId>org.twitter4j</groupId>            <artifactId>twitter4j-stream</artifactId>            <version>3.0.3</version>        </dependency>        <dependency>            <groupId>commons-collections</groupId>            <artifactId>commons-collections</artifactId>            <version>3.2.1</version>        </dependency>        <dependency>            <groupId>com.google.guava</groupId>            <artifactId>guava</artifactId>            <version>13.0</version>        </dependency>        <!-- kafka -->        <dependency>            <groupId>org.apache.kafka</groupId>            <artifactId>kafka_2.10</artifactId>            <version>0.9.0.0</version>            <!-- 这个必须要加,否则会有jar包冲突导致无法 -->            <exclusions>                <exclusion>                    <groupId>org.slf4j</groupId>                    <artifactId>slf4j-log4j12</artifactId>                </exclusion>            </exclusions>        </dependency>        <!-- storm-kafka -->        <dependency>            <groupId>org.apache.storm</groupId>            <artifactId>storm-kafka</artifactId>            <version>0.10.2</version>        </dependency>        <dependency>            <groupId>org.apache.kafka</groupId>            <artifactId>kafka-clients</artifactId>            <version>0.9.0.0</version>        </dependency>    </dependencies></project>

(2)KafkaTopology 类
package com.whl.demo.bolts;import java.util.Arrays;import storm.kafka.BrokerHosts;import storm.kafka.KafkaSpout;import storm.kafka.SpoutConfig;import storm.kafka.StringScheme;import storm.kafka.ZkHosts;import backtype.storm.Config;import backtype.storm.LocalCluster;import backtype.storm.StormSubmitter;import backtype.storm.spout.SchemeAsMultiScheme;import backtype.storm.topology.TopologyBuilder;public class KafkaTopology {    public static void main(String[] args) {        try {            System.out.println("=============start================");            //实例化topologyBuilder类。            TopologyBuilder topologyBuilder = new TopologyBuilder();            //设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。            String zks = "storm1:2181,storm2:2181,storm3:2181";            String topic = "kafkatest";            String zkRoot = "/storm"; // default zookeeper root configuration for storm            String id = "word";            BrokerHosts brokerHosts = new ZkHosts(zks);            // 配置Kafka订阅的Topic,以及zookeeper中数据节点目录和名字            SpoutConfig spoutConf = new SpoutConfig(brokerHosts, topic, zkRoot, id);            spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());            //spoutConf.forceFromStart = true;            spoutConf.zkServers = Arrays.asList(new String[] {"storm1", "storm2", "storm3"});            spoutConf.zkPort = 2181;            //spoutConf.forceFromStart = false;            KafkaSpout receiver = new KafkaSpout(spoutConf);            topologyBuilder.setSpout("kafka-spout", receiver,5).setNumTasks(10);            topologyBuilder.setBolt("kafka-bolt", new SimpleBolt(),5).setNumTasks(10).shuffleGrouping("kafka-spout");            //topologyBuilder.setBolt("kafka-hbase-bolt", new SimpleBolt2(),5).setNumTasks(10).shuffleGrouping("kafka-bolt");            Config config = new Config();            config.setDebug(false);            if (args != null && args.length > 0) {                /*设置该topology在storm集群中要抢占的资源slot数,一个slot对应这supervisor节点上的以个worker进程                 如果你分配的spot数超过了你的物理节点所拥有的worker数目的话,有可能提交不成功,加入你的集群上面已经有了                 一些topology而现在还剩下2个worker资源,如果你在代码里分配4个给你的topology的话,那么这个topology可以提交                 但是提交以后你会发现并没有运行。 而当你kill掉一些topology后释放了一些slot后你的这个topology就会恢复正常运行。                */                config.setNumWorkers(1);                StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());            }else{                //这里是本地模式下运行的启动代码。                config.setNumWorkers(2);                config.setMaxTaskParallelism(1);                LocalCluster cluster = new LocalCluster();                cluster.submitTopology("simple", config,                        topologyBuilder.createTopology());            }        } catch (Exception e) {            e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.        }    }}

(3)SimpleBolt 类

package com.whl.demo.bolts;import java.util.Map;import backtype.storm.task.OutputCollector;import backtype.storm.task.TopologyContext;import backtype.storm.topology.BasicOutputCollector;import backtype.storm.topology.OutputFieldsDeclarer;import backtype.storm.topology.base.BaseBasicBolt;import backtype.storm.topology.base.BaseRichBolt;import backtype.storm.tuple.Fields;import backtype.storm.tuple.Tuple;import backtype.storm.tuple.Values;import org.slf4j.Logger;import org.slf4j.LoggerFactory;public class SimpleBolt extends BaseRichBolt{    private OutputCollector collector;    protected static final Logger logger = LoggerFactory.getLogger(SimpleBolt.class);    public void declareOutputFields(OutputFieldsDeclarer declarer) {        declarer.declare(new Fields("info","id"));    }    @SuppressWarnings("rawtypes")    public void prepare(Map stormConf, TopologyContext context,                        OutputCollector collector) {        this.collector = collector;    }    public void execute(Tuple input) {        try {            String mesg = input.getString(0);            if (mesg != null) {                collector.emit(new Values( mesg+"mesg is processed!",mesg));               //System.out.println("Bolt"+this.hashCode()+":"+mesg);                logger.info("mesg:"+mesg);            }        } catch (Exception e) {            e.printStackTrace(); // To change body of catch statement use File |            collector.fail(input);                      // Settings | File Templates.        }        collector.ack(input);    }}
   


(4)打包成 jar 后 放置于 storm服务器。

bin/storm jar storm-demo-1.0-SNAPSHOT.jar com.whl.demo.bolts.KafkaTopology

注意坑点:将maven下的jar 放置于 storm 的lib目录下。运行后有错误,根据提示来选择。


(5)运行第一个项目,将在控制台能看到相应的输出结果。





原创粉丝点击