lvs piranha nginx tomcat (DR) 配置

来源:互联网 发布:雇佣网络水军 编辑:程序博客网 时间:2024/06/08 07:01

实验平台 : CentOS release5.2 (Final)

实验目标 : 快速撑握和理解Piranha方案.

 案例一:

ip:
10.10.42.23   lvs-realserver VIP
10.10.42.201  lvs-master
10.10.42.202  lvs-slave

10.10.42.203  realserver1
10.10.42.205  realserver2

10.10.42.201部署信息:
[root@sc1 lvs-mango]# vi /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave

[root@sc1 lvs-mango]# wget http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc1 lvs-mango]# wget ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc1 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing...                ########################################### [100%]
   1:epel-release           ########################################### [100%]

[root@sc1 lvs-mango]# yum install perl  perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc1 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing...                ########################################### [100%]
   1:ldirectord             ########################################### [100%]

[root@sc1 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6

[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

[root@sc1 lvs-mango]# yum install ipvsadm
[root@sc1 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64

[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf
serial_no = 110
primary = 10.10.42.201
service = lvs
backup_active = 1
backup = 10.10.42.202
heartbeat = 1
heartbeat_port = 539
keepalive = 8
deadtime = 9
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
tcp_timeout = 5
tcpfin_timeout = 6
udp_timeout = 7
virtual nginx_ceshi {
     active = 1
     address = 10.10.42.23 eth0:2
     vip_nmask = 255.255.255.0
     port = 80
     send = "GET / HTTP/1.0\r\n\r\n"
     expect = "HTTP"
     use_regex = 0
     load_monitor = none
     scheduler = rr
     protocol = tcp
     timeout = 6
     reentry = 15
     quiesce_server = 0
     server nginx_ceshi_s01 {
         address = 10.10.42.203
         active = 1
         port = 80
         weight = 1
     }
     server nginx_ceshi_s02 {
         address = 10.10.42.205
         active = 1
         port = 80
         weight = 1
     }
}

服务启动:
[root@slave1 ~]# /etc/init.d/piranha-gui start
[root@slave1 ~]# /etc/init.d/pulse start

验证启动状态:
[root@slave1 ~]# ifconfig
eth0:2    Link encap:Ethernet  HWaddr 00:50:56:8A:69:4B 
          inet addr:10.10.42.23  Bcast:10.10.42.255  Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1


10.10.42.202部署信息:
[root@sc2 lvs-mango]# cat /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave

[root@sc2 lvs-mango]# wget http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc2 lvs-mango]# wget ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc2 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing...                ########################################### [100%]
   1:epel-release           ########################################### [100%]

[root@sc2 lvs-mango]# yum install perl  perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc2 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing...                ########################################### [100%]
   1:ldirectord             ########################################### [100%]

[root@sc2 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6

[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

[root@sc2 lvs-mango]# yum install ipvsadm
[root@sc2 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64

[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf
serial_no = 110
primary = 10.10.42.201
service = lvs
backup_active = 1
backup = 10.10.42.202
heartbeat = 1
heartbeat_port = 539
keepalive = 8
deadtime = 9
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
tcp_timeout = 5
tcpfin_timeout = 6
udp_timeout = 7
virtual nginx_ceshi {
     active = 1
     address = 10.10.42.23 eth0:2
     vip_nmask = 255.255.255.0
     port = 80
     send = "GET / HTTP/1.0\r\n\r\n"
     expect = "HTTP"
     use_regex = 0
     load_monitor = none
     scheduler = rr
     protocol = tcp
     timeout = 6
     reentry = 15
     quiesce_server = 0
     server nginx_ceshi_s01 {
         address = 10.10.42.203
         active = 1
         port = 80
         weight = 1
     }
     server nginx_ceshi_s02 {
         address = 10.10.42.205
         active = 1
         port = 80
         weight = 1
     }
}

服务启动:
[root@slave1 ~]# /etc/init.d/piranha-gui start
[root@slave1 ~]# /etc/init.d/pulse start

验证启动状态:
[root@slave1 ~]# ifconfig

测试集群状态:
[root@slave1 ~]#ipvsadm -l
修改DNS指向10.10.42.23后访问域名,查看后端nginx日志看是否有请求分发过来。

主从切换验证:
停止主,通过日志看后台执行动作
[root@slave1 ~]# tail -100f /var/log/messages
修改DNS指向10.10.42.23后访问域名,查看后端nginx日志看是否有请求分发过来。

 

后端nginx配置文件:
[root@slave3 ~]# vi /usr/local/nginx/conf/nginx.conf
worker_processes  1;
error_log  logs/error.log  debug;
events {
    worker_connections  60000;
}

http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    gzip  on;

    upstream www.test.com{
         server 10.10.130.41:80;
         server 10.10.130.42:80;
         }

    upstream ro.test.com{
         server 10.10.5.155:80;
         server 10.10.5.156:80;
         }

    upstream flight.test.com{
         server 10.10.130.45:80;
         server 10.10.130.46:80;
         }

    upstream hotel.test.com{
         server 10.10.130.43:80;
         server 10.10.130.44:80;
         }

    upstream supply.test.com{
         server 10.10.130.102:80;
         server 10.10.130.103:80;
         }


    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  logs/access.log  main;
    access_log on;

    server {
        listen       80;
        server_name  www.test.com;
        access_log logs/www.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://www.test.com;
        }
    }

    server {
        listen       80;
        server_name  ro.test.com;
        access_log logs/ro.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://ro.test.com;
        }
    }

    server {
        listen       80;
        server_name  hotel.test.com;
        access_log logs/hotel.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://hotel.test.com;
        }
    }

    server {
        listen       80;
        server_name  flight.test.com;
        access_log logs/flight.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://flight.test.com;
        }
    }

    server {
        listen       80;
        server_name  supply.test.com;
        access_log logs/supply.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://supply.test.com;
        }
    }
}

 

 

 

 

 

[root@slave3 ~]# cat /usr/local/realserver.sh
#!/bin/bash
#RealServer服务脚本,直接路由方式
MpmWeb_VIP=10.10.42.23
start(){
ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP
/sbin/route add -host $MpmWeb_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK [lvs_dr]"
}

stop(){
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 down
/sbin/route del -host $MpmWeb_VIP
sysctl -p >/dev/null 2>&1
echo "RealServer Stoped [lvs_dr]"
}

restart(){
stop
start
}

case $1 in

start)
     start
      ;;
stop)
     stop
      ;;
restart)
     restart
      ;;
status)
     /sbin/ifconfig
      ;;
*)
   echo "Usage: $0 {start|stop|restart|status}"
   exit 1
esac

 

 

 

 

 

 

 

案例二:

.Piranha方案基本简绍.

1.Piranha方案优点:

1.1.1配置简洁高效:配置简便一个lvs.conf配置文件即可搞定(keepalived方案.)

1.1.2WEB配置界面:WEB配置对于那些不懂LVS配置的人员来说非常吸引力.

1.1.3完整的功能:

主备LVS (Load Balancer)HeartbeatHA (pulse,send_arp)

LoadBalancerReal Server间进程服务的Heartbeat (nanny)

IPVS功能 (lvsd)

IPVS的管理 (ipvsadm)

 

2.Piranha方案原理结构描述:

Piranha方案是基于LVS基础上设计的一套负载均衡高可用解决方案.

LVS运行在一对有相似配置的计算机上:

一个作为活动LVS Router(Active LVS Router)

一个作为备份LVS Router(Backup LVS Router)

 

活动LVS Router服务有两个角色:

* 均衡负载到真实服务器上。

* 检查真实服务器提供的服务是否正常。

备份LVS Router用来监控活动的LVS Router,以备活动的LVS Router失败时由备份LVS Router接管。

 

Pulse:Pulse进程运行在活动LVS Router和备份LVS Router上。

在备份LVS Router上,pulse发送一个心跳(heartbeat)到活动LVS Router的公网接口上以检查活动LVS Router是否正常。

在活动LVS Router上,pulse启动lvs进程并响应来自于备份LVS Router的心跳。

 

lvsd:lvs进程调用ipvsadm工具去配置和维护IPVS路由表,并为每一个在真实服务器上的虚拟服务启动一个nanny进程。

 

nanny:每一个nanny进程去检查真实服务器上的虚拟服务状态,并将故障情况通知lvs进程。假如一个故障被发现,lvs进程通知ipvsadmIPVS路由表中将此节点删除。

 

send_arp:如果备份LVS Router未收到来自于活动LVS Router的响应,

它将调用send_arp将虚拟IP地址再分配到备份LVS Router的公网接口上。

并在公网接口和局域网接口上分别发送一个命令去关掉活动LVS Router上的lvs进程。同时启动自己的lvs进程来调度客户端请求。

 

3.Piranha方案基本套件安装:

ip:
10.10.42.23   lvs-realserver VIP
10.10.42.201  lvs-master
10.10.42.202  lvs-slave

10.10.42.203  realserver1
10.10.42.205  realserver2

10.10.42.201部署信息:
[root@sc1 lvs-mango]# vi /etc/hosts
10.10.42.201 lvs-master
10.10.42.202 lvs-slave

[root@sc1 lvs-mango]# wget http://mirrors.sohu.com/fedora-epel/6Server/x86_64/epel-release-6-8.noarch.rpm
[root@sc1 lvs-mango]# wget ftp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
[root@sc1 lvs-mango]# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
Preparing...                ########################################### [100%]
   1:epel-release           ########################################### [100%]

[root@sc1 lvs-mango]# rpm -ivh ldirectord-3.9.6-0rc1.1.1.x86_64.rpm
warning: ldirectord-3.9.6-0rc1.1.1.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 17280ddf: NOKEY
Preparing...                ########################################### [100%]
   1:ldirectord             ########################################### [100%]

[root@sc1 lvs-mango]# vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6

[epel-debuginfo]
name=Extra Packages for Enterprise Linux 6 - $basearch - Debug
baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

[epel-source]
name=Extra Packages for Enterprise Linux 6 - $basearch - Source
baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
gpgcheck=1

 

[root@sc1 lvs-mango]#yum makecache ; yum install perl  perl-MailTools perl-Net-SSLeay perl-IO-Socket-INET6.noarch perl-Net-INET6Glue.noarch perl-Socket6.x86_64
[root@sc1 lvs-mango]# yum install ipvsadm
[root@sc1 lvs-mango]# yum -y install heartbeat.x86_64 heartbeat-devel.x86_64 heartbeat-libs.x86_64

4.配置文件介绍:

/etc/sysconfig/ha/lvs.cf     //http://ip:3636 web界面配置的配置文件写入此文件.

[root@slave1 ~]# vi /etc/sysconfig/ha/lvs.cf
serial_no = 110
primary = 10.10.42.201
service = lvs
backup_active = 1
backup = 10.10.42.202
heartbeat = 1
heartbeat_port = 539
keepalive = 8
deadtime = 9
network = direct
debug_level = NONE
monitor_links = 0
syncdaemon = 0
tcp_timeout = 5
tcpfin_timeout = 6
udp_timeout = 7
virtual nginx_ceshi {
     active = 1
     address = 10.10.42.23 eth0:2
     vip_nmask = 255.255.255.0
     port = 80
     send = "GET / HTTP/1.0\r\n\r\n"
     expect = "HTTP"
     use_regex = 0
     load_monitor = none
     scheduler = rr
     protocol = tcp
     timeout = 6
     reentry = 15
     quiesce_server = 0
     server nginx_ceshi_s01 {
         address = 10.10.42.203
         active = 1
         port = 80
         weight = 1
     }
     server nginx_ceshi_s02 {
         address = 10.10.42.205
         active = 1
         port = 80
         weight = 1
     }
}

/etc/init.d/piranha-gui start  //启动piranha服务的WEB配置界面.

/etc/init.d/pulse           //启动piranha服务读取的就是/etc/sysconfig/ha/lvs.cf.

 

.    Piranha配置

配置主LVS服务器.

# vi /etc/sysctl.conf找到下面行 //启用数据转发.

net.ipv4.ip_forward = 00改成1net.ipv4.ip_forward = 1

执行如下命令来应用:sysctl -p

 

通过WEB界面配置Piranha服务.

# /etc/init.d/piranha-gui start  //启动Piranha服务.

#/usr/sbin/piranha-passwd  //设置密码,请设置你的piranha服务WEB配置登陆密码.

http://10.10.42.201:3636/  输入用户名: piranha 及刚才设置的密码登陆.

A)CONTROL / MONITORING

 

B) GLOBAL SETTINGS

Primary server public IP:主服务器用于与应用服务器(Real Server)连接的IP

Primary server private IP:主服务器用于与备用服务器连接的心跳IP

Use Network Type:所选用的LVS模式。

 

C)REDUNDANCY配置 

Redundant server public ip:备用服务器的公网IP

Redundant server public IP:备用服务器用于与应用服务器(Real Server)连接的IP

Redundant server private IP:备用服务器用于与主服务器连接的心跳IP

Heartbeat interval:备用服务器对主服务器进行心跳检测的轮询时间。

Assume dead after:如果主服务器在指定时间内没有恢复心跳,则宣告服务器失效并进行接管。

Heartbeat runs on port:使用心跳来检测所使用的端口。

Monitor NIC links for failures:是否检测网卡的连接状态。

  

D)VIRTAL SERVERS配置

Name:定义虚拟服务器的名称。

Application port:指定此目标应用服务的端口。

Protocol:目标应用服务的网络协议,TCPUDP

Virtual IP Address:定义目标应用所使用的虚拟IP

Virtual IP Network Mask:定义目标应用所使用的虚拟IP的子网掩码。

Firewall Mark:当目标应用要使用多个IP端口时,结合IPTABLE设定防火墙标记。

Device:虚拟IP所挂接的网卡设备名。

Re-entry Time:当发现一个Real Server故障后,LVS Route对这个Server的检测间隔时间。

Server timeoutLVS RouteReal Server发送指令后,若超过此时间没有响应则认为服务器发生故障。

Quiesce server:一旦有Real Server加入或恢复,则所有负载队列记录均归"0"并重新进行分配。

Load monitoring tool:在Real Server中通过ruptimerup命令获得系统负载,以结合相应用的 Scheduling算法进行调度计算。

Scheduling:此虚拟服务器使用的调度算法。

Persistence:同一客户端长连接的保持时间。

Persistence Network Mask:长连接保持的子网掩码(网段)范围。

Load monitoring tool要求Real Server安装有ruptimerup,并要求LVS服务器可以使用root账号在不需要密码的情况下通过SSH连接到Real Server

Scheduling中包括以下8种调度策略:

Round-Robin Scheduling:轮询策略,IP分发时逐个地对Real Server进行轮询。 

Weighted Round-Robin Scheduling:加权轮询策略,配合权值进行轮询策略计算。

Least-Connection:最小连接优先策略,将新的IP请求分发到访问队列较短的Real Server

Weighted Least-Connections:加权最小连接优先策略,配合权值进行最小连接优先策略计算。

Locality-Based Least-Connection Scheduling:以下简称LBLCS,根据目标IP地址找出最近使用的服务器,若该服务器可用并且没有超载(系统压力未达到一半),就将请求发送到该服务器,否则使用最小连接优先策略。此策略主要针对的是Cache网关服务器。 

Locality-Based Least Connections with Replication Scheduling:与LBLCS类似,在LBLCS的基础上加入复制调度策略,使得"热门"网站使用时尽量Cache在同一台网关服务器中,进一步避免了在多台服务器中保存相同的Cache信息。此策略主要针对的边是Cache网关服务器。 

Destination Hashing Scheduling:通过对目标地址的Hash计算来确定目标服务器。此策略主要针对的是Cache网关服务器。 

Source Hashing Scheduling:通过对源地址的Hash计算来确定目标服务器。此策略主要针对的是Cache网关服务器。

 

E)添加real server

Name:设置此Real Server的名称。

Address:设置此Real ServerIP地址。

Weight:设置此Real Server的权值,当各Real Server的性能不相同时可设定性能较高的服务器得到较高的权值。

 

E)MONITORING SCRIPTS配置

Sending Program:通过程序实现Real Server中对应用服务可用性的判断(不能与Send同时使用)

Send:直接通过VIRTUAL SERVER中指定的端口发送指令。

ExpectSending ProgramSend后的返回值,如果与此返回值匹配,则表明此应用服务在当前Real  Server中运行正常。

Treat expect string as a regular expression:将Expect中的值作为正则表达式与返回值进行比对。 

注意:

此处的功能主要用于判断Real Server中的目标服务运行是否正常,如果发现服务失效,则主动在此VIRTUAL SERVER中隔离该Real Server

 

三、设置LVS相关服务自启动[Virtual Server]

/etc/init.d/piranha-gui start

/etc/init.d/pulse start

成功后的截图如下:

加入开启启动:

chkconfig –level 345 piranha-gui on

chkconfig –level 345 pulse on

 

四、RealServer系统配置[real server]

#!/bin/bash

#RealServer服务脚本,直接路由方式

MpmWeb_VIP=10.10.42.22

start(){

ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP

/sbin/route add -host $MpmWeb_VIP dev lo:0

echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce

echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore

echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

sysctl -p >/dev/null 2>&1

echo "RealServer Start OK [lvs_dr]"

}

 

stop(){

echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore

echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce

echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore

echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce

/sbin/ifconfig lo:0 down

/sbin/route del -host $MpmWeb_VIP

sysctl -p >/dev/null 2>&1

echo "RealServer Stoped [lvs_dr]"

}

 

restart(){

stop

start

}

 

case $1 in

 

start)

     start

      ;;

stop)

     stop

      ;;

restart)

     restart

      ;;

status)

     /sbin/ifconfig

      ;;

*)

   echo "Usage: $0 {start|stop|restart|status}"

   exit 1

esac

 

五、配置realserver前端代理

[root@slave3 ~]# vi /usr/local/nginx/conf/nginx.conf
worker_processes  1;
error_log  logs/error.log  debug;
events {
    worker_connections  60000;
}

http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    gzip  on;

    upstream www.test.com{
         server 10.10.130.41:80;
         server 10.10.130.42:80;
         }

    upstream ro.test.com{
         server 10.10.5.155:80;
         server 10.10.5.156:80;
         }

    upstream flight.test.com{
         server 10.10.130.45:80;
         server 10.10.130.46:80;
         }

    upstream hotel.test.com{
         server 10.10.130.43:80;
         server 10.10.130.44:80;
         }

    upstream supply.test.com{
         server 10.10.130.102:80;
         server 10.10.130.103:80;
         }


    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  logs/access.log  main;
    access_log on;

    server {
        listen       80;
        server_name  www.test.com;
        access_log logs/www.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://www.test.com;
        }
    }

    server {
        listen       80;
        server_name  ro.test.com;
        access_log logs/ro.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://ro.test.com;
        }
    }

    server {
        listen       80;
        server_name  hotel.test.com;
        access_log logs/hotel.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://hotel.test.com;
        }
    }

    server {
        listen       80;
        server_name  flight.test.com;
        access_log logs/flight.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://flight.test.com;
        }
    }

    server {
        listen       80;
        server_name  supply.test.com;
        access_log logs/supply.test.com.log main;
        location / {
             proxy_set_header   ServerName          $server_name;
             proxy_pass http://supply.test.com;
        }
    }
}

 

 

[root@slave3 ~]# cat /usr/local/realserver.sh
#!/bin/bash
#RealServer服务脚本,直接路由方式
MpmWeb_VIP=10.10.42.23
start(){
ifconfig lo:0 $MpmWeb_VIP netmask 255.255.255.255 broadcast $MpmWeb_VIP
/sbin/route add -host $MpmWeb_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK [lvs_dr]"
}

stop(){
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/ifconfig lo:0 down
/sbin/route del -host $MpmWeb_VIP
sysctl -p >/dev/null 2>&1
echo "RealServer Stoped [lvs_dr]"
}

restart(){
stop
start
}

case $1 in

start)
     start
      ;;
stop)
     stop
      ;;
restart)
     restart
      ;;
status)
     /sbin/ifconfig
      ;;
*)
   echo "Usage: $0 {start|stop|restart|status}"
   exit 1
esac

 

 

六、测试

打开IEhttp://supply.test.com/MpmWeb/不断刷新,能访问则说明成功。

 

1 0