amoeza+keepalived+mmm+mysql 高可用部署

来源:互联网 发布:客户画像 大数据分析 编辑:程序博客网 时间:2024/06/05 00:32

一. 数据库整体架构

1.架构目标

ü  消除单点故障,保证高可用

ü  网站读写分离

ü  网站读负载均衡

2.架构实现

a. 底层数据库使用MYSQL双主结构,避免MYSQL单点故障

b. 使用MMM实现读写请求的故障自动恢复

c. 使用AMOEBA实现读写分离、负载均衡

d. 使用KEEPALIVED避免AMOEBA单点故障

e. 整个结构对应用透明

 

3.后期架构优化

    后期数据库负载过大时(如达到单库CPU或IO等瓶颈),

按业务模块(会员、商品、交易)垂直切分数据,负载分离,

减轻单库压力,使用AMOEBA数据库链路路由,实现架构

平滑过渡。

注:不同模块库的表之间不要做JOIN操作

二. MYSQL双主安装配置

1.主机准备

主机名

IP地址

PORT

应用

MYSQL1

192.168.1.12

3310

MASTER1

MYSQL2

192.168.1.13

3310

MASTER2

2.软件准备

ü  cmake-2.8.8.tar.gz

ü  bison-2.5.tar.gz

ü  mysql-5.5.23.tar.gz

3.软件安装

Ø  CMAKE安装

tar zxvf cmake-2.8.8.tar.gz

cd cmake-2.8.8

./bootstrap

gmake

gmake install

Ø  BISON 安装

tar zxvf bison-2.5.tar.gz

cd bison-2.5

./configure

make

make install

 

Ø  MYSQL 安装

/usr/sbin/groupadd mysql

/usr/sbin/useradd -g mysql mysql

 

tar xvf mysql-5.5.23.tar.gz

cd mysql-5.5.23/

 

cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql \

-DMYSQL_UNIX_ADDR=/tmp/mysql.sock \

-DDEFAULT_CHARSET=utf8 \

-DDEFAULT_COLLATION=utf8_general_ci \

-DWITH_EXTRA_CHARSETS:STRING=all \

-DWITH_MYISAM_STORAGE_ENGINE=1 \

-DWITH_INNOBASE_STORAGE_ENGINE=1 \

-DWITH_READLINE=1 \

-DENABLED_LOCAL_INFILE=1 \

-DMYSQL_DATADIR=/mysql/data

 

make

make install

 

chown -R mysql:mysql /usr/local/mysql

mkdir -p /mysql/data/

mkdir -p /mysql/log/

chown -R mysql:mysql /mysql/

 

cd support-files/

cp mysql.server /etc/init.d/mysqld

 

/usr/local/mysql/scripts/mysql_install_db \

--defaults-file=/etc/my.cnf \

--basedir=/usr/local/mysql \

--datadir=/mysql/data \

--user=mysql

 

chmod +x /etc/init.d/mysqld

  chkconfig --add mysqld

chkconfig --level 345 mysqld on

 

service mysqld start

 

Ø  my.cnf参数配置

[client]

#password       = your_password

port            = 3310

socket          = /tmp/mysql.sock

 

[mysqld]

port            = 3310

socket          = /tmp/mysql.sock

skip-external-locking

skip-name-resolve

key_buffer_size = 100M

max_allowed_packet = 1M

sort_buffer_size = 1M

read_buffer_size = 1M

read_rnd_buffer_size = 4M

myisam_sort_buffer_size = 64M

query_cache_size= 16M

thread_concurrency = 8

log-error=/mysql/log/ldmall.err

slow-query-log-file = /mysql/log/slow.log

long_query_time = 30

log = /mysql/log/ldmall.log

performance_schema=on

 

binlog_format=row

max_binlog_cache_size=50M

max_binlog_size=500M

max_connect_errors=50000

max_connections=2100

max_user_connections=2000

open_files_limit=131070

sync_binlog=0

table_definition_cache=2048

table_open_cache=2048

thread_cache_size=256

innodb_log_group_home_dir=/mysql/data

innodb_data_home_dir=/mysql/data

innodb_adaptive_flushing

innodb_adaptive_hash_index=on

innodb_buffer_pool_instances=1

innodb_buffer_pool_size=100M

innodb_file_per_table

 

innodb_flush_log_at_trx_commit=2

innodb_flush_method=O_DIRECT

innodb_io_capacity=1000

innodb_lock_wait_timeout=100

innodb_log_buffer_size=20M

innodb_log_file_size=100M

innodb_log_files_in_group=4

innodb_max_dirty_pages_pct=60

innodb_open_files=60000

innodb_read_io_threads=8

innodb_stats_on_metadata=on

innodb_thread_concurrency=16

innodb_write_io_threads=8

 

log-bin=mysql-bin

relay-log=b2cmysql1-relay-bin

server-id       = 1

binlog-do-db = db_test

replicate-do-db = db_test

binlog-ignore-db = mysql

binlog-ignore-db = information_schema

binlog-ignore-db = performance_schema

 

[mysqldump]

quick

max_allowed_packet = 16M

 

[mysql]

no-auto-rehash

 

[myisamchk]

key_buffer_size = 128M

sort_buffer_size = 128M

read_buffer = 2M

write_buffer = 2M

 

[mysqlhotcopy]

interactive-timeout

注:server-id两节点需不同

 

Ø  双主配置

#MYSQL1和MYSQL2上执行

grant super,replication client,process on *.* to 'rep_agent'@'192.168.1.%' identified by '1qazse4';

grant replication client on *.* to 'rep_monitor'@'192.168.1.%' identified by '1qazse4';

grant replication slave on *.* to  'replication'@'192.168.1.%' identified by '1qazse4';

flush privileges;

#MYSQL1上执行

change master to

master_host='192.168.1.13',

master_port=3310,

master_user='replication',

master_password='1qazse4',

master_log_file='mysql-bin.000003',

master_log_pos=335;

#MYSQL2上执行

change master to

master_host='192.168.1.12',

master_port=3310,

master_user='replication',

master_password='1qazse4',

master_log_file='mysql-bin.000011',

master_log_pos=107;

 










MYSQL1和MYSQL2上执行

Slave start;

show slave status \G;

SHOW PROCESSLIST\G;

 

三. MMM安装配置

1.主机软件准备

主机名

IP地址

应用

MON1

192.168.1.10

MMM

ü  Algorithm-Diff-1.1902.tar.gz

ü  Proc-Daemon-0.14.tar.gz

ü  Test-Simple-0.98.tar.gz

ü  Time-HiRes-1.9725.tar.gz

ü  DBI-1.622.tar.gz

ü  DBD-mysql-4.022.tar.gz

ü  mmm-1.0-pre2.tar.bz2

虚拟IP 准备:

ü  192.168.1.15

ü  192.168.1.16

ü  192.168.1.17

2.软件安装

先按照第二节MYSQL安装步骤安装MYSQL软件,以下软件另外还需在两数据库节点安装

Ø  Algorithm-Diff安装

tar xvzf Algorithm-Diff-1.1902.tar.gz

cd Algorithm-Diff-1.1902

perl Makefile.PL

make

make install

注:其余perl模块安装步骤类似上面,DBI安装完,DBD安装前做以下操作

echo "/usr/local/mysql/lib/" >> /etc/ld.so.conf

 ldconfig

Ø  mmm 安装

tar xjvf mmm-1.0-pre2.tar.bz2

cd mmm-1.0-pre2

./install.pl

 

3.MMM配置

#在MYSQL1上配置/usr/local/mmm/etc/mmm_agent.conf

#

# Master-Master Manager config (agent)

#

 

# Debug mode

debug no

 

# Paths

pid_path /usr/local/mmm/var/mmmd_agent.pid

bin_path /usr/local/mmm/bin

 

# Logging setup

log mydebug

file /usr/local/mmm/var/mmm-debug.log

level debug

 

log mytraps

file /usr/local/mmm/var/mmm-traps.log

level trap

 

# MMMD command socket tcp-port and ip

bind_port 9989

 

# Cluster interface

cluster_interface eth0

 

# Define current server id

this MYSQL1

mode master

 

# For masters

peer MYSQL2

 

# Cluster hosts addresses and access params

host MYSQL1

ip 172.16.24.12

port 3310

user rep_agent

password 1qazse4

 

host MYSQL2

ip 192.168.1.13

port 3310

user rep_agent

password 1qazse4

#在MYSQL2上配置/usr/local/mmm/etc/mmm_agent.conf

#

# Master-Master Manager config (agent)

#

 

# Debug mode

debug no

 

# Paths

pid_path /usr/local/mmm/var/mmmd_agent.pid

bin_path /usr/local/mmm/bin

 

# Logging setup

log mydebug

file /usr/local/mmm/var/mmm-debug.log

level debug

 

log mytraps

file /usr/local/mmm/var/mmm-traps.log

level trap

 

# MMMD command socket tcp-port and ip

bind_port 9989

 

# Cluster interface

cluster_interface eth0

 

# Define current server id

this MYSQL2

mode master

 

# For masters

peer MYSQL1

 

# Cluster hosts addresses and access params

host MYSQL1

ip 172.16.24.12

port 3310

 user rep_agent

 password 1qazse4

host MYSQL2

ip 192.168.1.13

port 3310

user rep_agent

 password 1qazse4

#在MON1上配置/usr/local/mmm/etc/mmm_mon.conf

  注:注意VIP的配置不能被广播地址包含

#

# Master-Master Manager config (monitor)

#

 

# Debug mode

debug no

 

# Paths

pid_path /usr/local/mmm/var/mmmd.pid

status_path /usr/local/mmm/var/mmmd.status

bin_path /usr/local/mmm/bin

 

# Logging setup

log mydebug

    file /usr/local/mmm/var/mmm-debug.log

    level debug

 

log mytraps

    file /usr/local/mmm/var/mmm-traps.log

    level trap

 

# MMMD command socket tcp-port

bind_port 9988

agent_port 9989

monitor_ip 127.0.0.1

 

# Cluster interface

cluster_interface eth0

 

# Cluster hosts addresses and access params

host MYSQL1

 

  ip 192.168.1.12

  port 3310

  user rep_monitor

  password 1qazse4

  mode master

  peer MYSQL2

 

  host MYSQL2

  ip 192.168.1.13

  port 3310

  user rep_monitor

  password 1qazse4

  mode master

  peer MYSQL1

 

#

# Define roles

#

 

active_master_role writer

 

# Mysql Reader role

role reader

    mode balanced

    servers MYSQL1, MYSQL2

    ip 192.168.1.15, 192.168.1.16

 

# Mysql Writer role

role writer

    mode exclusive

    servers MYSQL1, MYSQL2

    ip 192.168.1.17

#

# Checks parameters

#

 

# Ping checker

check ping

    check_period 5

    trap_period 20

    timeout 5

 

# Mysql checker

 

 check mysql

    check_period 5

    trap_period  20

    timeout 5

 

# Mysql replication backlog checker

check rep_backlog

    check_period 10

    trap_period 20

    max_backlog 60

    timeout 5

 

# Mysql replication threads checker

check rep_threads

    check_period 5

    trap_period 20

  timeout 5

 

  

#MYSQL1、MYSQL2上启动agent

mmmd_agent

netstat -tlnp

 

#MON1上启动mon

mmmd_mon

mmm_control set_online MYSQL1

mmm_control set_online MYSQL2

mmm_control show

 

 

 

 

 

 

 

 

 

四. AMOEBA安装配置

1.主机软件准

主机名

IP地址

应用

MON1

192.168.1.10

AMOEBA

MON2

192.168.1.14

AMOEBA

ü  jdk-7u25-linux-x64.rpm

ü  amoeba-mysql-binary-2.1.0-RC5.tar.gz

2.软件安装

#JDK 安装

rpm -ivh jdk-7u25-linux-x64.rpm

ln -s /usr/java/ jdk1.7.0_25 /usr/java/ jdk1.7

#/etc/profile 配置增加

export JAVA_HOME=/usr/java/ jdk1.7

export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH

#AMOEBA 安装

mkdir /usr/local/amoeba/

tar -zxvf amoeba-mysql-binary-2.1.0-RC5.tar.gz -C /usr/local/amoeba/

 

3.AMOEBA 配置

#配置读写分离,负载均衡,修改amoeba.xml如下部分

<property name="authenticator">

        <bean class="com.meidusa.amoeba.mysql.server.MysqlClientAuthenticator">

 

                <property name="user">123456</property>

 

                <property name="password">123456</property>

 

                <property name="filter">

                        <bean class="com.meidusa.amoeba.server.IPAccessController">

<property name="ipFile">${amoeba.home}/conf/access_list.conf</property>

                        </bean>

                </property>

        </bean>

</property>

<runtime class="com.meidusa.amoeba.mysql.context.MysqlRuntimeContext">

        <!-- proxy server net IO Read thread size -->

        <property name="readThreadPoolSize">200</property>

 

        <!-- proxy server client process thread size -->

        <property name="clientSideThreadPoolSize">300</property>

 

        <!-- mysql server data packet process thread size -->

        <property name="serverSideThreadPoolSize">300</property>

 

        <!-- per connection cache prepared statement size  -->

        <property name="statementCacheSize">500</property>

 

        <!-- query timeout( default: 60 second , TimeUnit:second) -->

        <property name="queryTimeout">60</property>

</runtime>

<queryRouter class="com.meidusa.amoeba.mysql.parser.MysqlQueryRouter">

        <property name="ruleLoader">

                <bean class="com.meidusa.amoeba.route.TableRuleFileLoader">

                        <property name="ruleFile">${amoeba.home}/conf/rule.xml</property>

                        <property name="functionFile">${amoeba.home}/conf/ruleFunctionMap.xml</property>

                </bean>

        </property>

        <property name="sqlFunctionFile">${amoeba.home}/conf/functionMap.xml</property>

        <property name="LRUMapSize">1500</property>

        <property name="defaultPool">server3</property>

 

                <property name="writePool">server3</property>

        <property name="readPool">server1</property>

       

        <property name="readPool">server2</property>

        -->

        <property name="needParse">true</property>

</queryRouter>

                              

#修改dbServers.xml如下部分

<factoryConfig class="com.meidusa.amoeba.mysql.net.MysqlServerConnectionFactory">

        <property name="manager">${defaultManager}</property>

        <property name="sendBufferSize">64</property>

        <property name="receiveBufferSize">128</property>

 

        <!-- mysql port -->

        <property name="port">3310</property>

 

        <!-- mysql schema -->

        <property name="schema">db_test</property>

 

        <!-- mysql user -->

        <property name="user">123456</property>

 

        <!--  mysql password -->

        <property name="password">123456</property>

 

</factoryConfig>

<dbServer name="server1"  parent="abstractServer">

        <factoryConfig>

                <!-- mysql ip -->

                <property name="ipAddress">192.168.1.15</property>

        </factoryConfig>

</dbServer>

<dbServer name="server2"  parent="abstractServer">

        <factoryConfig>

                <!-- mysql ip -->

                <property name="ipAddress">192.168.1.16</property>

        </factoryConfig>

</dbServer>

<dbServer name="server3"  parent="abstractServer">

        <factoryConfig>

                <!-- mysql ip -->

                <property name="ipAddress">192.168.1.17</property>

        </factoryConfig>

</dbServer>

 

<dbServer name="multiPool" virtual="true">

        <poolConfig class="com.meidusa.amoeba.server.MultipleServerPool">

                <!-- Load balancing strategy: 1=ROUNDROBIN , 2=WEIGHTBASED , 3=HA-->

                <property name="loadbalance">1</property>

 

                <!-- Separated by commas,such as: server1,server2,server1 -->

                <property name="poolNames">server1,server2,server3</property>

        </poolConfig>

</dbServer>

#优化启动参数

DEFAULT_OPTS="-server -Xms256m -Xmx256m -Xss1024k"

#启动AMOEBA

/usr/local/amoeba/bin/amoeba start &

五. KEEPALIVED 安装配置

1.主机软件准备

主机名

IP地址

应用

MON1

192.168.1.10

KEEPALIVED

MON2

192.168.1.14

KEEPALIVED

ü  keepalived-1.1.19.tar.gz

虚拟IP :192.168.1.18

2.软件安装

 tar zxvf keepalived-1.1.19.tar.gz

cd keepalived-1.1.19

./configure --prefix=/usr/local/keepalived

make

make install

cp /usr/local/keepalived/sbin/keepalived /usr/sbin/

cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/

cp /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/

mkdir /etc/keepalived

cd /etc/keepalived/

3.KEEPALIVED配置

#配置keepalived.conf

! Configuration File for keepalived

 

global_defs {

   notification_email {

   linhaiqiang@126.com

   }

   notification_email_from localhost@127.0.0.1

   smtp_server 127.0.0.1

   smtp_connect_timeout 30

   router_id amoeba

}

vrrp_script. check_amoeba {

     script. "/etc/keepalived/check_amoeba.sh"

     interval 2

     weight 2

     }

 

vrrp_instance VI_1 {

    state MASTER ##BACKUP

    interface eth0

    virtual_router_id 51

    priority 100 ##98

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

 

track_script. {

     check_amoeba

     }

        

    virtual_ipaddress {

         192.168.1.18

    }

}

 

#编写check_amoeba.sh脚本

#!/bin/bash

A=`ps -C java --no-heading|wc -l`

if [ $A -eq 0 ];then

/etc/local/amoeba/bin/amoeba start &

sleep 3

if [ `ps -C java --no-heading|wc -l` -eq 0 ];then

/etc/init.d/keepalived stop

fi

fi

#启动KEEPALIVED

/etc/init.d/keepalived start

六.FAILOVER及LOAD BALANCE 测试

测试用例列表

用例编号

用例操作

期望结果

实际结果

UC001

停止任一MYSQL MASTER服务

请求自动切换到另一MASTER

通过

UC002

断电任一数据库服务器

请求自动切换到另一数据库服务器

通过

UC003

停止任一AMOEBA服务

请求自动切换到另一AMOEBA

通过

UC004

断电任一AMOEBA主机

请求自动切换到另一AMOEBA主机

通过

UC005

配置负载规则,连续发送9个读请求

6个请求到MASTER1,3个请求MASTER2

通过

 

0 0
原创粉丝点击