rocketMQ集群部署

来源:互联网 发布:linux内核用在哪 编辑:程序博客网 时间:2024/05/22 08:18

rocketMQ集群部署

rocketMQ安装包下载

wget https://github.com/alibaba/RocketMQ/archive/v3.5.8.tar.gz

//注:到github下载为未编译的包

//需要自行编译:

解压:

tar –zxvf RocketMQ. 3.5.8.tar.gz

到解压目录进行编译:

sh install.sh

编译需要的环境安装:

yum –y install maven git

编译成功拷贝target下面的编译包及为安装包

或者直接下载别人编译好的安装包

解压安装包:

tar–zxvf alibaba-rocketmq-3.1.7.tar.gz

配置多Master多Slave模式,同步双写

#Broker所属哪个集群,默认【DefaultCluster

brokerClusterName=Cluster12

#本机主机名

brokerName=broker-12

#BrokerId,必须是大等于0的整数,0表示Master>0表示Slave,一个Master可以挂多个SlaveMasterSlave通过BrokerName来配对,默认【0

brokerId=1

#删除文件时间点,默认凌晨4

deleteWhen=04

#文件保留时间,默认48小时

fileReservedTime=48

#Broker的角色 -ASYNC_MASTER异步复制Master - SYNC_MASTER同步双写Master- SLAVE

brokerRole=SLAVE

#刷盘方式 -ASYNC_FLUSH异步刷盘 - SYNC_FLUSH同步刷盘

flushDiskType=ASYNC_FLUSH

#Name Server地址

namesrvAddr=192.168.3.12:9876;192.168.3.13:9876

#Broker对外服务的监听端口,默认【10911

listenPort=10900

defaultTopicQueueNums=4

#是否允许Broker自动创建Topic,建议线下开启,线上关闭,默认【true

autoCreateTopicEnable=true

#是否允许Broker自动创建订阅组,建议线下开启,线上关闭,默认【true

autoCreateSubscriptionGroup=true

mapedFileSizeCommitLog=1073741824

mapedFileSizeConsumeQueue=50000000

destroyMapedFileIntervalForcibly=120000

redeleteHangedFileInterval=120000

diskMaxUsedSpaceRatio=88

storePathRootDir=/usr/local/alibaba-rocketmq/data/store12

storePathCommitLog=/usr/local/alibaba-rocketmq/data/store12/commitlog

maxMessageSize=65536

flushCommitLogLeastPages=4

flushConsumeQueueLeastPages=2

flushCommitLogThoroughInterval=10000

flushConsumeQueueThoroughInterval=60000

checkTransactionMessageEnable=false

sendMessageThreadPoolNums=128

pullMessageThreadPoolNums=128

其他配置类似:

rocketMQ启动:

namesrv启动:

nohup sh mqnamesrv1>$ROCKETMQ_HOME/log/ng.log 2>$ROCKETMQ_HOME/log/ng-error.log &

broker启动:

nohup sh mqbroker -n192.168.1.102:9876 -c $ROCKETMQ_HOME/conf/2m-2s-async/broker-b.properties>$ROCKETMQ_HOME/log/mq.log &

 

 

Java测试demo:

producer:

packagecn.rmt.ap.rocketmq.util;

 

importjava.util.concurrent.TimeUnit;

 

import com.alibaba.rocketmq.client.exception.MQClientException;

importcom.alibaba.rocketmq.client.producer.LocalTransactionExecuter;

importcom.alibaba.rocketmq.client.producer.LocalTransactionState;

importcom.alibaba.rocketmq.client.producer.SendResult;

importcom.alibaba.rocketmq.client.producer.SendStatus;

importcom.alibaba.rocketmq.client.producer.TransactionCheckListener;

importcom.alibaba.rocketmq.client.producer.TransactionMQProducer;

importcom.alibaba.rocketmq.common.message.Message;

import com.alibaba.rocketmq.common.message.MessageExt;

 

public class Producer{

  

   private staticSendStatus sendMsg(TransactionMQProducer producer,String body){

       Messagemsg = new Message("TopicTest1",// topic

                "TagA",                        // tag

                "key1",                   // key消息关键词,多个KeyKEY_SEPARATOR隔开(查询消息使用)

                ( body).getBytes());   // body

       SendStatussendStatus = null;

      try {

         SendResult sendResult = producer.sendMessageInTransaction(

                msg, newLocalTransactionExecuter(){

                     publicLocalTransactionState executeLocalTransactionBranch(Messagemsg, Object arg) {

                         returnLocalTransactionState.COMMIT_MESSAGE;

                     }

                },

                "$$$");

         System.out.println(sendResult);

         sendStatus = sendResult.getSendStatus();

      } catch(MQClientExceptione) {

         // TODOAuto-generated catch block

         e.printStackTrace();

      }

      return sendStatus;

       

    }

  

   public static voidmain(String[] args) throws MQClientException,InterruptedException {

        /**

         * 一个应用创建一个Producer,由应用来维护此对象,可以设置为全局对象或者单例<br>

         * 注意:ProducerGroupName需要由应用来保证唯一,一类Producer集合的名称,这类Producer通常发送一类消息,

         * 且发送逻辑一致<br>

         * ProducerGroup这个概念发送普通的消息时,作用不大,但是发送分布式事务消息时,比较关键,

         * 因为服务器会回查这个Group下的任意一个Producer

         */

        finalTransactionMQProducerproducer = newTransactionMQProducer("ProducerGroupName");

        // nameserver服务

        producer.setNamesrvAddr("192.168.3.12:9876;192.168.3.13:9876");

        producer.setInstanceName("Producer");

        producer.setVipChannelEnabled(false);

       

        /**

         * Producer对象在使用之前必须要调用start初始化,初始化一次即可<br>

         * 注意:切记不可以在每次发送消息时,都调用start方法

         */

        producer.start();

        // 服务器回调Producer,检查本地事务分支成功还是失败

        producer.setTransactionCheckListener(

             newTransactionCheckListener() {

 

            publicLocalTransactionState checkLocalTransactionState(

                    MessageExt msg) {

                System.out.println("checkLocalTransactionState--" +new String(msg.getBody()));

                returnLocalTransactionState.COMMIT_MESSAGE;

            }

        });

       

 

        /**

         * 下面这段代码表明一个Producer对象可以发送多个topic,多个tag的消息。

         * 注意:send方法是同步调用,只要不抛异常就标识成功。但是发送成功也可会有多种状态,<br>

         * 例如消息写入Master成功,但是Slave不成功,这种情况消息属于成功,但是对于个别应用如果对消息可靠性要求极高,<br>

         * 需要对这种情况做处理。另外,消息可能会存在发送失败的情况,失败重试由应用来处理。

         */

 

        for (inti = 0; i < 10;i++) {

            try {

              SendStatussendStatus = Producer.sendMsg(producer, String.valueOf(i) );

              if(!sendStatus.equals(SendStatus.SEND_OK)){

//                  System.out.println(sendStatus.toString());

                 //发送失败处理

              }

               

 

//                {

//                    Message msg = newMessage("TopicTest1", // topic

//                           "TagB",                        // tag

//                           "key2",                 // key消息关键词,多个KeyKEY_SEPARATOR隔开(查询消息使用)

//                            ("HelloMetaQB").getBytes());   // body

//                    SendResult sendResult =producer.sendMessageInTransaction(

//                            msg, newLocalTransactionExecuter(){

//                                publicLocalTransactionState executeLocalTransactionBranch(Messagemsg, Object arg){

//                                    returnLocalTransactionState.COMMIT_MESSAGE;

//                                }

//                            },

//                            "$$$");

//                   System.out.println(i+" "+sendResult);

//                }

//

//                {

//                    Message msg = newMessage("TopicTest2", // topic

//                            "TagC",                         // tag

//                           "key3",                  // key

//                            ("HelloMetaQC").getBytes());   // body

//                    SendResult sendResult =producer.sendMessageInTransaction(

//                            msg, newLocalTransactionExecuter(){

//                                publicLocalTransactionState executeLocalTransactionBranch(Messagemsg, Object arg){

//                                    returnLocalTransactionState.COMMIT_MESSAGE;

//                                }

//                            },

//                            "$$$");

//                   System.out.println(i+" "+sendResult);

//                }

            } catch(Exceptione) {

                e.printStackTrace();

            }

            TimeUnit.MILLISECONDS.sleep(100);

        }

 

        /**

         * 应用退出时,要调用shutdown来清理资源,关闭网络连接,从MetaQ服务器上注销自己

         * 注意:我们建议应用在JBOSSTomcat等容器的退出钩子里调用shutdown方法

         */

        //producer.shutdown();

        Runtime.getRuntime().addShutdownHook(new Thread(newRunnable() {

            public void run() {

                producer.shutdown();

            }

        }));

        System.exit(0);

    } // 执行本地事务,由客户端回调

}

 

 

 

consumer pull方式:

packagecn.rmt.ap.rocketmq.util;

 

importjava.util.HashMap;

importjava.util.List;

importjava.util.Map;

importjava.util.Set;

 

importcom.alibaba.rocketmq.client.consumer.DefaultMQPullConsumer;

importcom.alibaba.rocketmq.client.consumer.PullResult;

importcom.alibaba.rocketmq.client.exception.MQClientException;

importcom.alibaba.rocketmq.common.message.MessageExt;

importcom.alibaba.rocketmq.common.message.MessageQueue;

 

public classPullConsumer {

   // Java缓存 

    private static finalMap<MessageQueue, Long>offseTable = newHashMap<MessageQueue, Long>(); 

 

    /**

     * 主动拉取方式消费

     * 

     * @throwsMQClientException

     */ 

    public static voidmain(String[] args) throws MQClientException{ 

        /**

         * 一个应用创建一个Consumer,由应用来维护此对象,可以设置为全局对象或者单例<br>

         * 注意:ConsumerGroupName需要由应用来保证唯一 ,最好使用服务的包名区分同一服务,一类Consumer集合的名称,

         * 这类Consumer通常消费一类消息,且消费逻辑一致

         * PullConsumerConsumer的一种,应用通常主动调用Consumer的拉取消息方法从Broker拉消息,主动权由应用控制

         */ 

        DefaultMQPullConsumer consumer = newDefaultMQPullConsumer("ConsumerGroupName"); 

        // //nameserver服务 

        consumer.setNamesrvAddr("192.168.3.12:9876;192.168.3.13:9876"); 

        consumer.setInstanceName("Consumber"); 

        consumer.setVipChannelEnabled(false);

        consumer.start();

 

        // 拉取订阅主题的队列,默认队列大小是

        Set<MessageQueue> mqs = consumer.fetchSubscribeMessageQueues("TopicTest2"); 

        for(MessageQueue mq : mqs) { 

            System.out.println("Consumefrom the queue: " +mq); 

            SINGLE_MQ: while (true) { 

                try

                    PullResult pullResult =consumer.pullBlockIfNotFound(mq,null, getMessageQueueOffset(mq),32); 

                    List<MessageExt> list =pullResult.getMsgFoundList(); 

                    if (list !=null&& list.size() < 100) { 

                        for(MessageExtmsg : list) {

                            System.out.println("keys:"+msg.getKeys()+"body:" +new String(msg.getBody())); 

                        } 

                    } 

                    System.out.println(pullResult.getNextBeginOffset());

                    putMessageQueueOffset(mq,pullResult.getNextBeginOffset()); 

                    switch (pullResult.getPullStatus()){

                        case FOUND

                            break

                        case NO_MATCHED_MSG

                            break

                        case NO_NEW_MSG

                            breakSINGLE_MQ; 

                        case OFFSET_ILLEGAL

                            break

                        default

                            break

                    } 

                } catch(Exceptione) { 

                    e.printStackTrace(); 

                } 

            } 

        } 

        consumer.shutdown(); 

    } 

 

    private static voidputMessageQueueOffset(MessageQueuemq, longoffset) { 

        offseTable.put(mq,offset); 

    } 

 

    private static longgetMessageQueueOffset(MessageQueuemq) { 

        Long offset = offseTable.get(mq); 

        if (offset !=null) { 

            System.out.println(offset); 

            return offset

        } 

        return 0; 

    } 

}

 

 

consumer push方式:

 

packagecn.rmt.ap.rocketmq.util;

 

importjava.util.List;

 

importcom.alibaba.rocketmq.client.consumer.DefaultMQPushConsumer;

importcom.alibaba.rocketmq.client.consumer.listener.ConsumeConcurrentlyContext;

importcom.alibaba.rocketmq.client.consumer.listener.ConsumeConcurrentlyStatus;

importcom.alibaba.rocketmq.client.consumer.listener.MessageListenerConcurrently;

importcom.alibaba.rocketmq.common.consumer.ConsumeFromWhere;

importcom.alibaba.rocketmq.common.message.MessageExt;

 

public classPushConsumer {

   public static voidmain(String[] args) { 

        DefaultMQPushConsumer consumer=newDefaultMQPushConsumer("ConsumerGroupName");

        consumer.setNamesrvAddr("192.168.3.12:9876;192.168.3.13:9876");

        consumer.setVipChannelEnabled(false);

        try

             

//            // 订阅PushTopicTagpush的消息,都订阅消息

            consumer.subscribe("TopicTest2","*"); 

//             

//            // 程序第一次启动从消息队列头获取数据 

            consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);

//            //可以修改每次消费消息的数量,默认设置是每次消费一条 

//           consumer.setConsumeMessageBatchMaxSize(10);

            //注册消费的监听 

            consumer.registerMessageListener(

                 new MessageListenerConcurrently(){

 

            @Override

            publicConsumeConcurrentlyStatus consumeMessage(

                   List<MessageExt> arg0,ConsumeConcurrentlyContextarg1) {

                // msgs中只收集同一个topic,同一个tag,并且key相同的message 

                    // 会把不同的消息分别放置到不同的队列中

                    for(MessageExtmsg:arg0){

                    if(msg.getTopic().equals("TopicTest1")){

                        if(msg.getKeys().equals("key1")){

                            System.out.println("key1: "+new String(msg.getBody()));

                        }

                        else if(msg.getKeys().equals("key2")){

                            System.out.println("key2: "+new String(msg.getBody()));

                        }else{

                            System.out.println("未知 msgKey : "+msg.getKeys()+new String(msg.getBody()));

                        }

                    }else if(msg.getTopic().equals("TopicTest2")){

                        if(msg.getKeys().equals("key3")){

                            System.out.println("key3: "+new String(msg.getBody()));

                        }else{

                            System.out.println("未知 msgKey : "+msg.getKeys()+new String(msg.getBody()));

                        }

                    }

                   

                    }    

                    returnConsumeConcurrentlyStatus.CONSUME_SUCCESS;

            } 

            }); 

 

           

            consumer.start();

//            Thread.sleep(5000);

////            5秒后挂载消费端消费 

//            consumer.suspend(); 

             

        } catch(Exception e) { 

            e.printStackTrace(); 

        } 

    } 

}

 

 

pom.xml文件:

<dependency>

         <groupId>org.slf4j</groupId>

         <artifactId>slf4j-api</artifactId>

         <version>1.7.21</version>

      </dependency>

     

      <dependency>

         <groupId>ch.qos.logback</groupId>

         <artifactId>logback-classic</artifactId>

         <version>1.1.3</version>

      </dependency>

 

      <dependency>

         <groupId>ch.qos.logback</groupId>

         <artifactId>logback-core</artifactId>

         <version>1.1.3</version>

      </dependency>

 

      <dependency>

         <groupId>com.alibaba.rocketmq</groupId>

         <artifactId>rocketmq-client</artifactId>

         <version>3.5.9</version>

      </dependency>

 

      <dependency>

         <groupId>junit</groupId>

         <artifactId>junit</artifactId>

         <version>4.11</version>

         <scope>test</scope>

      </dependency>

 

 

 

0 0