Keepalived + LVS(DR) 高可用负载均衡集群
时间:2015-04-22 14:35 来源:linux.it.net.cn 作者:IT
1、方案说明
目标是搭建企业级的高可用负载均衡集群服务。采用Keepalived + LVS + Tomcat + Memcache Session Manager + Memcached解决方案。其中:
-
LVS:Linux Virtual Server是 Linux 虚拟服务器,可以把多台服务器虚拟为一个虚拟 IP ,同时实现各种负载均衡算法
-
Keepalived: Keepalived 是基于 LVS ,并与 LVS 高度融合的,监控 LVS 下所有真实服务器健康状态,并通过健康健康状态动态调整 LVS 的复杂均衡转发权重和规则。同时 keepalived 具备热备功能。
-
Tomcat:提供后端的WEB服务
-
Memcache Session Manager: 负责同步和复制Tomcat服务会话,同时实现其本身高可用
-
Memcached:存放后端WEB服务的Session
2、环境准备
4台虚拟机,每台512内存(小本太慢,内存不多啊)
-
操作系统:Redhat Linux AS 5 (2.6.18-8.el5) 32位
-
LVS:ipvsadm-1.24.tar.gz
-
Keepalived: keepalived-1.1.17
-
Tomcat:tomcat-6.0.35
-
Memcache Session Manager 1.6.0
-
memcached: memcached-1.4.5.tar.gz
IP规划
名称
IP
说明
VIP
10.10.10.10/32
虚拟IP,外部请求IP
调度主
10.10.10.11/24
Keepalived+LVS主
调度备
10.10.10.12/24
Keepalived+LVS备
后端服务1
10.10.10.13/24
后端真实服务器1(这里是WEB)
后端服务2
10.10.10.14/24
后端真实服务器2(这里是WEB)
3、Keepalived + LVS 服务端安装配置
keepalived+LVS方案中,两台调度服务需要安排和配置Keepalived+LVS,并配置主备关系,实现负载均衡和高可用
目标服务器:调度主和调度备,两台的安装配置基本完全相同,只是keepalive.conf的部分配置需要修改。
3.1、系统组件依赖
# yum install gcc
# yum install kernel-devel
# yum install openssl-devel
3.2、LVS 安装配置
命令操作代码
-
# cd /tools
-
# wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.24.tar.gz
-
# ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux
-
# tar -zxvf ipvsadm-1.24.tar.gz
-
# cd ipvsadm-1.24
-
# make;make install
# cd /tools
# wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.24.tar.gz
# ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux
# tar -zxvf ipvsadm-1.24.tar.gz
# cd ipvsadm-1.24
# make;make install
以上操作完成后,LVS已经完整完成,安装后的可执行文件ipvsadm在/sbin/下。LVS安装完成后无需独立配置,后续统一由Keepalived进行托管配置和管理。
3.3、Keepalived 安装配置
命令操作代码
-
# cd /tools
-
# wget http://www.keepalived.org/software/keepalived-1.1.17.tar.gz
-
# tar -zxvf keepalived-1.1.17.tar.gz
-
# cd keepalived-1.1.17
-
// 注意:因为前面设置了ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux的软连接,所以这里没有使用 --with-kernel-dir参数,否则:./configure --with-kernel-dir=/usr/src/kernels/2.6.18-8.el5-i686, 确保Keepalived与内核结合。
-
# ./configure
-
……
-
Keepalived configuration
-
------------------------
-
Keepalived version : 1.2.2
-
Compiler : gcc
-
Compiler flags : -g -O2
-
Extra Lib : -lpopt -lssl -lcrypto
-
Use IPVS Framework : Yes
-
IPVS sync daemon support : Yes
-
IPVS use libnl : No
-
Use VRRP Framework : Yes
-
Use Debug flags : No
-
# make
-
# make install
-
// 拷贝keepalived的服务脚本到系统自启动目录(启动操作系统自启动)
-
# cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/
-
// 拷贝keepalived的启动参数到系统参数配置目录(实际就是:-D)
-
# cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/
-
// 拷贝keepalived程序文件到系统程序目录
-
# cp /usr/local/sbin/keepalived /usr/sbin/
-
// 创建keepalived配置文件目录
-
# mkdir /etc/keepalived
-
-
// 配置keepalived服务及启动
-
# chkconfig --add keepalived
-
# chkconfig --level 2345 keepalived on
# cd /tools
# wget http://www.keepalived.org/software/keepalived-1.1.17.tar.gz
# tar -zxvf keepalived-1.1.17.tar.gz
# cd keepalived-1.1.17
// 注意:因为前面设置了ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux的软连接,所以这里没有使用 --with-kernel-dir参数,否则:./configure --with-kernel-dir=/usr/src/kernels/2.6.18-8.el5-i686, 确保Keepalived与内核结合。
# ./configure
……
Keepalived configuration
------------------------
Keepalived version : 1.2.2
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : Yes
IPVS sync daemon support : Yes
IPVS use libnl : No
Use VRRP Framework : Yes
Use Debug flags : No
# make
# make install
// 拷贝keepalived的服务脚本到系统自启动目录(启动操作系统自启动)
# cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/
// 拷贝keepalived的启动参数到系统参数配置目录(实际就是:-D)
# cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/
// 拷贝keepalived程序文件到系统程序目录
# cp /usr/local/sbin/keepalived /usr/sbin/
// 创建keepalived配置文件目录
# mkdir /etc/keepalived
// 配置keepalived服务及启动
# chkconfig --add keepalived
# chkconfig --level 2345 keepalived on
创建和编辑keepalived配置文件:/etc/keepalived/keepalived.conf,配置和定义虚拟服务器(完整文件请参见附件)
/etc/keepalived/keepalived.conf代码
-
global_defs {
-
router_id KEEPALIVED_LVS
-
}
-
-
vrrp_sync_group KEEPALIVED_LVS {
-
group {
-
KEEPALIVED_LVS_WEB
-
}
-
}
-
-
vrrp_instance KEEPALIVED_LVS_WEB {
-
state MASTER //注意:如果Keepalived的备机,这里是SLAVE
-
interface eth0
-
lvs_sync_daemon_interface eth0
-
garp_master_delay 5
-
virtual_router_id 100
-
priority 150 //注意:这里是主备的权重,备一般权重低于主
-
advert_int 1
-
authentication {
-
auth_type PASS
-
auth_pass 111111
-
}
-
virtual_ipaddress {
-
10.10.10.10
-
}
-
}
-
-
virtual_server 10.10.10.10 80 {
-
delay_loop 3
-
lb_algo wrr //定义负载均衡算法,这里是权重轮训
-
lb_kind DR //定义模式,这里是Direct route
-
persistence_timeout 0 //会话保存时长(秒),0表示不使用stickyness会话
-
protocol TCP
-
// 后端服务器定义
-
real_server 10.10.10.13 80 {
-
weight 1 //权重
-
//HttpGET 方式验证真实服务有效性
-
HTTP_GET {
-
url {
-
//验证后端服务是否正常的访问地址
-
path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb
-
//访问地址内容的 MD5 摘,通过对比摘要验证后端服务器是否可用
-
digest 26f11e326fc7c597355f213e5677ae75
-
}
-
connect_timeout 3 //连接超时时间
-
nb_get_retry 3 //重试次数
-
delay_before_retry 3 //每次重试前等待延迟时间
-
}
-
}
-
-
real_server 10.10.10.14 80 {
-
weight 1
-
HTTP_GET {
-
url {
-
path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb
-
digest 26f11e326fc7c597355f213e5677ae75
-
}
-
connect_timeout 3
-
nb_get_retry 3
-
delay_before_retry 3
-
}
-
}
-
}
global_defs {
router_id KEEPALIVED_LVS
}
vrrp_sync_group KEEPALIVED_LVS {
group {
KEEPALIVED_LVS_WEB
}
}
vrrp_instance KEEPALIVED_LVS_WEB {
state MASTER //注意:如果Keepalived的备机,这里是SLAVE
interface eth0
lvs_sync_daemon_interface eth0
garp_master_delay 5
virtual_router_id 100
priority 150 //注意:这里是主备的权重,备一般权重低于主
advert_int 1
authentication {
auth_type PASS
auth_pass 111111
}
virtual_ipaddress {
10.10.10.10
}
}
virtual_server 10.10.10.10 80 {
delay_loop 3
lb_algo wrr //定义负载均衡算法,这里是权重轮训
lb_kind DR //定义模式,这里是Direct route
persistence_timeout 0 //会话保存时长(秒),0表示不使用stickyness会话
protocol TCP
// 后端服务器定义
real_server 10.10.10.13 80 {
weight 1 //权重
//HttpGET 方式验证真实服务有效性
HTTP_GET {
url {
//验证后端服务是否正常的访问地址
path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb
//访问地址内容的 MD5 摘,通过对比摘要验证后端服务器是否可用
digest 26f11e326fc7c597355f213e5677ae75
}
connect_timeout 3 //连接超时时间
nb_get_retry 3 //重试次数
delay_before_retry 3 //每次重试前等待延迟时间
}
}
real_server 10.10.10.14 80 {
weight 1
HTTP_GET {
url {
path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb
digest 26f11e326fc7c597355f213e5677ae75
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
注意:调度备机的配置与调度主完全相同,区别在于keepalived.conf配置文件中:state MASTER/SLAVE; priority 150/100
4、后端服务器安装配置
4.1、LVS 客户端安装配置
LVS-DR模式中,后端真实服务器(RealServer)无需安装相关软件,只需要对VIP进行绑定和路由设置等一系列操作,这里整理为一个脚本:lvs_realserver.sh,详细解释如下,完整文件请参见附件。
Lvs_realserver.sh代码
-
#!/bin/bash
-
# description: Config realserver
-
LVS_VIP=10.10.10.10
-
/etc/rc.d/init.d/functions
-
case "$1" in
-
start)
-
/sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP
-
/sbin/route add -host $LVS_VIP dev lo:0
-
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
-
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
-
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
-
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
-
sysctl -p >/dev/null 2>&1
-
echo "RealServer Start OK"
-
;;
-
stop)
-
/sbin/ifconfig lo:0 down
-
/sbin/route del $LVS_VIP >/dev/null 2>&1
-
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
-
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
-
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
-
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
-
echo "RealServer Stoped"
-
;;
-
*)
-
echo "Usage: $0 {start|stop}"
-
exit 1
-
esac
-
exit 0
#!/bin/bash
# description: Config realserver
LVS_VIP=10.10.10.10
/etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP
/sbin/route add -host $LVS_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $LVS_VIP >/dev/null 2>&1
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "RealServer Stoped"
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
esac
exit 0
从附件下载该脚本,拷贝到后端真实服务器中,运行:./lvs_realserver.sh start|stop
4.2、MSM-Tomcat安装和配置
后端服务器采用Memcache Session manager + tomcat方式实现集群中会话的同步和复制。
详细配置请参考:APACHE(proxy_ajp_stickysession) + TOMCAT(msm_sticky)实现HA
5、验证
5.1、启动和关闭
调度服务启动:service keepalived start
调度服务关闭:service keepalived stop
后端真实服务启动:
# ./lvs_realserver.sh start
然后在启动tomcat
5.2、调度服务中常用操作
调度服务中查看VIP
[root@hadoop11 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 00:0c:29:9b:12:07 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.11/24 brd 10.10.10.255 scope global eth0
inet 10.10.10.10/32 scope global eth0
inet6 fe80::20c:29ff:fe9b:1207/64 scope link
valid_lft forever preferred_lft forever
3: sit0: <NOARP> mtu 1480 qdisc noop
link/sit 0.0.0.0 brd 0.0.0.0
调度服务中查看转发情况
[root@hadoop11 ~]# ipvsadm -ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes
-> RemoteAddress:Port
TCP 10.10.10.10:80 0 0 0 0 0
-> 10.10.10.14:80 0 0 0 0 0
-> 10.10.10.13:80 0 0 0 0 0
检查 keepalived 进程
[root@hadoop00 keepalived]# ps aux|grep keepalived
root 6635 0.0 0.1 4352 596 ? Ss 20:30 0:00 keepalived -D
root 6636 0.0 0.2 4396 1332 ? S 20:30 0:00 keepalived -D
root 6638 0.0 0.1 4396 936 ? S 20:30 0:00 keepalived -D
应该有 3 个进程,其中一个是主服务进程,另外两个分别是 checker 子进程和 vrrp 子进程
[root@hadoop00 keepalived]# pstree |grep keepalived
|-keepalived---2*[keepalived]
查看 LVS 内核模块
[root@hadoop11 ~]# lsmod|grep ip_vs
ip_vs_wrr 6977 1
ip_vs_wlc 6081 0
ip_vs 77569 6 ip_vs_wrr,ip_vs_wlc
查看系统日志
因为我在启动 keepalived 是使用了选项 –D , 这将详细的打印日志消息
tail -f /var/log/messages
5.3、效果测试
测试负载均衡转发
分别启动主/备调度和所有后端真实服务,然后在网络内其他机器中打开浏览器访问VIP。
使用Firefox打开:http://10.10.10.10 ,如下:
使用Ctrl+F5强制刷新后,可以看到本次服务是另外一台后端服务。
测试Session会话
使用界面上的Setting Session表单提交一个Session值,然后重新页面,通过LVS分别转发到不同的后端服务器后,查看Show All Session中会话是否同步和复制。
测试后端服务高可用
1、关闭一台后端服务(10.10.10.14),然后通过浏览器反复刷新,查看是否前端一直可用。
2、通过调度上LVS的统计命令查看:
[root@hadoop11 ~]# ipvsadm -ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes
-> RemoteAddress:Port
TCP 10.10.10.10:80 15 95 0 11365 0
-> 10.10.10.13:80 7 42 0 4676 0
从命令显示情况,可以看出目前服务的后端服务器只有10.10.10.13。重新请求10.10.10.14后从新查看
[root@hadoop11 ~]# ipvsadm -ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Conns InPkts OutPkts InBytes OutBytes
-> RemoteAddress:Port
TCP 10.10.10.10:80 15 95 0 11365 0
-> 10.10.10.14:80 0 0 0 0 0
-> 10.10.10.13:80 7 42 0 4676 0
从命令返回可以看出后端服务恢复成两台。
测试调度服务器本身的高可用
1、关闭主调度服务(10.10.10.11)的keepalived服务
// 主调度服务器上操作
[root@hadoop11 ~]# service keepalived stop
Stopping keepalived: [ OK ]
[root@hadoop11 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 00:0c:29:9b:12:07 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.11/24 brd 10.10.10.255 scope global eth0
inet6 fe80::20c:29ff:fe9b:1207/64 scope link
valid_lft forever preferred_lft forever
3: sit0: <NOARP> mtu 1480 qdisc noop
link/sit 0.0.0.0 brd 0.0.0.0
// 备调度服务器上操作
[root@hadoop12 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 00:0c:29:e7:21:09 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.12/24 brd 10.10.10.255 scope global eth0
inet 10.10.10.10/32 scope global eth0
inet6 fe80::20c:29ff:fee7:2109/64 scope link
valid_lft forever preferred_lft forever
3: sit0: <NOARP> mtu 1480 qdisc noop
link/sit 0.0.0.0 brd 0.0.0.0
通过上面的命令可以看出keepalived服务shutdown后,VIP已经浮动到备调度,备调度的keepalived+LVS接管了集群的调度服务。
当然,这个时候可以使用前端浏览器访问VIP测试集群是否可用。
5、常见问题及注意事项
1、LVS+DR模式中,只支持IP的转发,不支持端口转发,也就是说在keepalived.conf的virtual_server和real_server的配置节点中端口必须一样。
(责任编辑:IT)
1、方案说明目标是搭建企业级的高可用负载均衡集群服务。采用Keepalived + LVS + Tomcat + Memcache Session Manager + Memcached解决方案。其中:
2、环境准备4台虚拟机,每台512内存(小本太慢,内存不多啊)
3、Keepalived + LVS 服务端安装配置keepalived+LVS方案中,两台调度服务需要安排和配置Keepalived+LVS,并配置主备关系,实现负载均衡和高可用目标服务器:调度主和调度备,两台的安装配置基本完全相同,只是keepalive.conf的部分配置需要修改。 3.1、系统组件依赖# yum install gcc# yum install kernel-devel # yum install openssl-devel 3.2、LVS 安装配置
命令操作代码
# cd /tools # wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.24.tar.gz # ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux # tar -zxvf ipvsadm-1.24.tar.gz # cd ipvsadm-1.24 # make;make install 以上操作完成后,LVS已经完整完成,安装后的可执行文件ipvsadm在/sbin/下。LVS安装完成后无需独立配置,后续统一由Keepalived进行托管配置和管理。 3.3、Keepalived 安装配置
命令操作代码
# cd /tools # wget http://www.keepalived.org/software/keepalived-1.1.17.tar.gz # tar -zxvf keepalived-1.1.17.tar.gz # cd keepalived-1.1.17 // 注意:因为前面设置了ln -sv /usr/src/kernels/2.6.18-8.el5-i686/ /usr/src/linux的软连接,所以这里没有使用 --with-kernel-dir参数,否则:./configure --with-kernel-dir=/usr/src/kernels/2.6.18-8.el5-i686, 确保Keepalived与内核结合。 # ./configure …… Keepalived configuration ------------------------ Keepalived version : 1.2.2 Compiler : gcc Compiler flags : -g -O2 Extra Lib : -lpopt -lssl -lcrypto Use IPVS Framework : Yes IPVS sync daemon support : Yes IPVS use libnl : No Use VRRP Framework : Yes Use Debug flags : No # make # make install // 拷贝keepalived的服务脚本到系统自启动目录(启动操作系统自启动) # cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/ // 拷贝keepalived的启动参数到系统参数配置目录(实际就是:-D) # cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/ // 拷贝keepalived程序文件到系统程序目录 # cp /usr/local/sbin/keepalived /usr/sbin/ // 创建keepalived配置文件目录 # mkdir /etc/keepalived // 配置keepalived服务及启动 # chkconfig --add keepalived # chkconfig --level 2345 keepalived on创建和编辑keepalived配置文件:/etc/keepalived/keepalived.conf,配置和定义虚拟服务器(完整文件请参见附件)
/etc/keepalived/keepalived.conf代码
global_defs { router_id KEEPALIVED_LVS } vrrp_sync_group KEEPALIVED_LVS { group { KEEPALIVED_LVS_WEB } } vrrp_instance KEEPALIVED_LVS_WEB { state MASTER //注意:如果Keepalived的备机,这里是SLAVE interface eth0 lvs_sync_daemon_interface eth0 garp_master_delay 5 virtual_router_id 100 priority 150 //注意:这里是主备的权重,备一般权重低于主 advert_int 1 authentication { auth_type PASS auth_pass 111111 } virtual_ipaddress { 10.10.10.10 } } virtual_server 10.10.10.10 80 { delay_loop 3 lb_algo wrr //定义负载均衡算法,这里是权重轮训 lb_kind DR //定义模式,这里是Direct route persistence_timeout 0 //会话保存时长(秒),0表示不使用stickyness会话 protocol TCP // 后端服务器定义 real_server 10.10.10.13 80 { weight 1 //权重 //HttpGET 方式验证真实服务有效性 HTTP_GET { url { //验证后端服务是否正常的访问地址 path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb //访问地址内容的 MD5 摘,通过对比摘要验证后端服务器是否可用 digest 26f11e326fc7c597355f213e5677ae75 } connect_timeout 3 //连接超时时间 nb_get_retry 3 //重试次数 delay_before_retry 3 //每次重试前等待延迟时间 } } real_server 10.10.10.14 80 { weight 1 HTTP_GET { url { path /checkRealServerHealth.28055dab3fc0a85271dddbeb0464bfdb digest 26f11e326fc7c597355f213e5677ae75 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } } 注意:调度备机的配置与调度主完全相同,区别在于keepalived.conf配置文件中:state MASTER/SLAVE; priority 150/100 4、后端服务器安装配置4.1、LVS 客户端安装配置LVS-DR模式中,后端真实服务器(RealServer)无需安装相关软件,只需要对VIP进行绑定和路由设置等一系列操作,这里整理为一个脚本:lvs_realserver.sh,详细解释如下,完整文件请参见附件。
Lvs_realserver.sh代码
#!/bin/bash # description: Config realserver LVS_VIP=10.10.10.10 /etc/rc.d/init.d/functions case "$1" in start) /sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP /sbin/route add -host $LVS_VIP dev lo:0 echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce sysctl -p >/dev/null 2>&1 echo "RealServer Start OK" ;; stop) /sbin/ifconfig lo:0 down /sbin/route del $LVS_VIP >/dev/null 2>&1 echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce echo "RealServer Stoped" ;; *) echo "Usage: $0 {start|stop}" exit 1 esac exit 0从附件下载该脚本,拷贝到后端真实服务器中,运行:./lvs_realserver.sh start|stop 4.2、MSM-Tomcat安装和配置后端服务器采用Memcache Session manager + tomcat方式实现集群中会话的同步和复制。详细配置请参考:APACHE(proxy_ajp_stickysession) + TOMCAT(msm_sticky)实现HA
|